1 #ifndef __ASM_SPINLOCK_LNKGET_H
2 #define __ASM_SPINLOCK_LNKGET_H
5 * None of these asm statements clobber memory as LNKSET writes around
6 * the cache so the memory it modifies cannot safely be read by any means
7 * other than these accessors.
10 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
14 asm volatile ("LNKGETD %0, [%1]\n"
24 static inline void arch_spin_lock(arch_spinlock_t *lock)
28 asm volatile ("1: LNKGETD %0,[%1]\n"
31 " LNKSETDZ [%1], %0\n"
34 " ANDT %0, %0, #HI(0x3f000000)\n"
35 " CMPT %0, #HI(0x02000000)\n"
44 /* Returns 0 if failed to acquire lock */
45 static inline int arch_spin_trylock(arch_spinlock_t *lock)
49 asm volatile (" LNKGETD %0,[%1]\n"
52 " LNKSETDZ [%1], %0\n"
55 " ANDT %0, %0, #HI(0x3f000000)\n"
56 " CMPT %0, #HI(0x02000000)\n"
58 "1: XORNZ %0, %0, %0\n"
68 static inline void arch_spin_unlock(arch_spinlock_t *lock)
72 asm volatile (" SETD [%0], %1\n"
74 : "da" (&lock->lock), "da" (0)
82 * Write locks are easy - we just set bit 31. When unlocking, we can
83 * just write zero since the lock is exclusively held.
86 static inline void arch_write_lock(arch_rwlock_t *rw)
90 asm volatile ("1: LNKGETD %0,[%1]\n"
93 " LNKSETDZ [%1], %0\n"
96 " ANDT %0, %0, #HI(0x3f000000)\n"
97 " CMPT %0, #HI(0x02000000)\n"
100 : "da" (&rw->lock), "bd" (0x80000000)
106 static inline int arch_write_trylock(arch_rwlock_t *rw)
110 asm volatile (" LNKGETD %0,[%1]\n"
113 " LNKSETDZ [%1], %0\n"
116 " ANDT %0, %0, #HI(0x3f000000)\n"
117 " CMPT %0, #HI(0x02000000)\n"
119 "1: XORNZ %0, %0, %0\n"
121 : "da" (&rw->lock), "bd" (0x80000000)
129 static inline void arch_write_unlock(arch_rwlock_t *rw)
133 asm volatile (" SETD [%0], %1\n"
135 : "da" (&rw->lock), "da" (0)
139 /* write_can_lock - would write_trylock() succeed? */
140 static inline int arch_write_can_lock(arch_rwlock_t *rw)
144 asm volatile ("LNKGETD %0, [%1]\n"
155 * Read locks are a bit more hairy:
156 * - Exclusively load the lock value.
158 * - Store new lock value if positive, and we still own this location.
159 * If the value is negative, we've already failed.
160 * - If we failed to store the value, we want a negative result.
161 * - If we failed, try again.
162 * Unlocking is similarly hairy. We may have multiple read locks
163 * currently active. However, we know we won't have any write
166 static inline void arch_read_lock(arch_rwlock_t *rw)
170 asm volatile ("1: LNKGETD %0,[%1]\n"
172 " LNKSETDPL [%1], %0\n"
175 " ANDT %0, %0, #HI(0x3f000000)\n"
176 " CMPT %0, #HI(0x02000000)\n"
185 static inline void arch_read_unlock(arch_rwlock_t *rw)
191 asm volatile ("1: LNKGETD %0,[%1]\n"
193 " LNKSETD [%1], %0\n"
195 " ANDT %0, %0, #HI(0x3f000000)\n"
196 " CMPT %0, #HI(0x02000000)\n"
203 static inline int arch_read_trylock(arch_rwlock_t *rw)
207 asm volatile (" LNKGETD %0,[%1]\n"
209 " LNKSETDPL [%1], %0\n"
212 " ANDT %0, %0, #HI(0x3f000000)\n"
213 " CMPT %0, #HI(0x02000000)\n"
227 /* read_can_lock - would read_trylock() succeed? */
228 static inline int arch_read_can_lock(arch_rwlock_t *rw)
232 asm volatile ("LNKGETD %0, [%1]\n"
237 : "da" (&rw->lock), "bd" (0x80000000)
242 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
243 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
245 #define arch_spin_relax(lock) cpu_relax()
246 #define arch_read_relax(lock) cpu_relax()
247 #define arch_write_relax(lock) cpu_relax()
249 #endif /* __ASM_SPINLOCK_LNKGET_H */