本文共 6542 字,大约阅读时间需要 21 分钟。
函数arch_spin_lock()实现:
static inline void arch_spin_lock(arch_spinlock_t *lock){ unsigned int tmp; arch_spinlock_t lockval, newval; asm volatile( /* Atomically increment the next ticket. */ ARM64_LSE_ATOMIC_INSN( /* LL/SC */" prfm pstl1strm, %3\n""1: ldaxr %w0, %3\n"" add %w1, %w0, %w5\n"" stxr %w2, %w1, %3\n"" cbnz %w2, 1b\n", /* LSE atomics */" mov %w2, %w5\n"" ldadda %w2, %w0, %3\n" __nops(3) ) /* Did we get the lock? */" eor %w1, %w0, %w0, ror #16\n"" cbz %w1, 3f\n" /* * No: spin on the owner. Send a local event to avoid missing an * unlock before the exclusive load. */" sevl\n""2: wfe\n"" ldaxrh %w2, %4\n"" eor %w1, %w2, %w0, lsr #16\n"" cbnz %w1, 2b\n" /* We got the lock. Critical section starts here. */"3:" : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) : "Q" (lock->owner), "I" (1 << TICKET_SHIFT) : "memory");}
在理解上面操作之前,先看看ARMv8手册相关说明:
A PE can use the Wait for Event (WFE) mechanism to enter a low-power state, depending on the value of an EventRegister for that PE. To enter the low-power state, the PE executes a Wait For Event instruction, WFE, and if the EventRegister is clear, the PE can enter the low-power state.If the PE does enter the low-power state, it remains in that low-power state until it receives a WFE wake-up event.The architecture does not define the exact nature of the low-power state, except that the execution of a WFEinstruction must not cause a loss of memory coherency.WFE mechanism behavior depends on the interaction of all of the following, that are described in the subsectionsthat follow:• The Event Register for the PE. See subsection The Event Register on page D1-1529.• The Wait For Event instruction, WFE. See subsection The Wait For Event instruction on page D1-1529.• WFE wake-up events. See subsection WFE wake-up events in AArch64 state on page D1-1530• The Send Event instructions, SEV and SEVL that can cause WFE wake-up events. See subsection The SendEvent instructions on page D1-1530.
也就是说,PE可以进入低功耗模式,采用执行WFE指令。WFE指令实现基于一个Event register,即一个事件寄存器,如果事件寄存器被清除
则PE进入低功耗,进而执行WFE。这意味着当Event register被设置时,PE被唤醒。当然,这种是处理器内部逻辑来唤醒的。
还需要注意的是,这里的WFE指令到底是让处理器干啥,是未定义的,只要不让RAM丢失数据即可。也就是维护好cache 和memory一致性
就好。
WFE依赖下面几点:
1. 一个Event Register寄存器。
2. 一个等待Event的指令,即WFE。
3. WFE所等待的唤醒事件,定义哪些事件可以唤醒。
4. 发送事件指令,即WEV和SEVL。这两个指令可以触发唤醒事件,进而让处于WFE状态的PE重新工作起来。
详细的内容读者可以参考上面给出的文档链接。
所以,我们可以看到函数arch_spin_lock()实现。其中有三个标号,代表了三个跳转。标号3代表获取了锁,函数执行到此后,简单设置后返回。
标号2到标号3是一个WFE状态检查,即处理唤醒事件,如果么有唤醒事件,则继续处于WFE状态。注意标号2之前的SEVL指令,其用于发送唤醒事件。
可以想象,SEVL是个处理器间的唤醒发送操作。我们这段spin_lock代码,真正发送事件的应该是在unlock中。
static inline int arch_spin_trylock(arch_spinlock_t *lock){ unsigned int tmp; arch_spinlock_t lockval; asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ " prfm pstl1strm, %2\n" "1: ldaxr %w0, %2\n" " eor %w1, %w0, %w0, ror #16\n" " cbnz %w1, 2f\n" " add %w0, %w0, %3\n" " stxr %w1, %w0, %2\n" " cbnz %w1, 1b\n" "2:", /* LSE atomics */ " ldr %w0, %2\n" " eor %w1, %w0, %w0, ror #16\n" " cbnz %w1, 1f\n" " add %w1, %w0, %3\n" " casa %w0, %w1, %2\n" " and %w1, %w1, #0xffff\n" " eor %w1, %w1, %w0, lsr #16\n" "1:") : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) : "I" (1 << TICKET_SHIFT) : "memory"); return !tmp;}
static inline void arch_spin_unlock(arch_spinlock_t *lock){ unsigned long tmp; asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ " ldrh %w1, %0\n" " add %w1, %w1, #1\n" " stlrh %w1, %0", /* LSE atomics */ " mov %w1, #1\n" " staddlh %w1, %0\n" __nops(1)) : "=Q" (lock->owner), "=&r" (tmp) : : "memory");}
/* * Write lock implementation. * * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is * exclusively held. * * The memory barriers are implicit with the load-acquire and store-release * instructions. */static inline void arch_write_lock(arch_rwlock_t *rw){ unsigned int tmp; asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ " sevl\n" "1: wfe\n" "2: ldaxr %w0, %1\n" " cbnz %w0, 1b\n" " stxr %w0, %w2, %1\n" " cbnz %w0, 2b\n" __nops(1), /* LSE atomics */ "1: mov %w0, wzr\n" "2: casa %w0, %w2, %1\n" " cbz %w0, 3f\n" " ldxr %w0, %1\n" " cbz %w0, 2b\n" " wfe\n" " b 1b\n" "3:") : "=&r" (tmp), "+Q" (rw->lock) : "r" (0x80000000) : "memory");}
static inline int arch_write_trylock(arch_rwlock_t *rw){ unsigned int tmp; asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ "1: ldaxr %w0, %1\n" " cbnz %w0, 2f\n" " stxr %w0, %w2, %1\n" " cbnz %w0, 1b\n" "2:", /* LSE atomics */ " mov %w0, wzr\n" " casa %w0, %w2, %1\n" __nops(2)) : "=&r" (tmp), "+Q" (rw->lock) : "r" (0x80000000) : "memory"); return !tmp;}
static inline void arch_write_unlock(arch_rwlock_t *rw){ asm volatile(ARM64_LSE_ATOMIC_INSN( " stlr wzr, %0", " swpl wzr, wzr, %0") : "=Q" (rw->lock) :: "memory");}
/* * Read lock implementation. * * It exclusively loads the lock value, increments it and stores the new value * back if positive and the CPU still exclusively owns the location. If the * value is negative, the lock is already held. * * During unlocking there may be multiple active read locks but no write lock. * * The memory barriers are implicit with the load-acquire and store-release * instructions. * * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC * and LSE implementations may exhibit different behaviour (although this * will have no effect on lockdep). */static inline void arch_read_lock(arch_rwlock_t *rw){ unsigned int tmp, tmp2; asm volatile( " sevl\n" ARM64_LSE_ATOMIC_INSN( /* LL/SC */ "1: wfe\n" "2: ldaxr %w0, %2\n" " add %w0, %w0, #1\n" " tbnz %w0, #31, 1b\n" " stxr %w1, %w0, %2\n" " cbnz %w1, 2b\n" __nops(1), /* LSE atomics */ "1: wfe\n" "2: ldxr %w0, %2\n" " adds %w1, %w0, #1\n" " tbnz %w1, #31, 1b\n" " casa %w0, %w1, %2\n" " sbc %w0, %w1, %w0\n" " cbnz %w0, 2b") : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : : "cc", "memory");}
static inline void arch_read_unlock(arch_rwlock_t *rw){ unsigned int tmp, tmp2; asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ "1: ldxr %w0, %2\n" " sub %w0, %w0, #1\n" " stlxr %w1, %w0, %2\n" " cbnz %w1, 1b", /* LSE atomics */ " movn %w0, #0\n" " staddl %w0, %2\n" __nops(2)) : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : : "memory");}static inline int arch_read_trylock(arch_rwlock_t *rw){ unsigned int tmp, tmp2; asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ " mov %w1, #1\n" "1: ldaxr %w0, %2\n" " add %w0, %w0, #1\n" " tbnz %w0, #31, 2f\n" " stxr %w1, %w0, %2\n" " cbnz %w1, 1b\n" "2:", /* LSE atomics */ " ldr %w0, %2\n" " adds %w1, %w0, #1\n" " tbnz %w1, #31, 1f\n" " casa %w0, %w1, %2\n" " sbc %w1, %w1, %w0\n" __nops(1) "1:") : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : : "cc", "memory"); return !tmp2;}
转载地址:http://hgqti.baihongyu.com/