]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/arm/include/asm/spinlock.h
Merge branches 'fixes', 'misc', 'mmci', 'unstable/dma-for-next' and 'sa11x0' into...
[karo-tx-linux.git] / arch / arm / include / asm / spinlock.h
index 4f2c28060c9aa227c47e73ac6557c45add91128f..ef3c6072aa45345ae4594f22aebbe9a9ebc538f1 100644 (file)
@@ -5,21 +5,13 @@
 #error SMP not supported on pre-ARMv6 CPUs
 #endif
 
-#include <asm/processor.h>
+#include <linux/prefetch.h>
 
 /*
  * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
  * extensions, so when running on UP, we have to patch these instructions away.
  */
-#define ALT_SMP(smp, up)                                       \
-       "9998:  " smp "\n"                                      \
-       "       .pushsection \".alt.smp.init\", \"a\"\n"        \
-       "       .long   9998b\n"                                \
-       "       " up "\n"                                       \
-       "       .popsection\n"
-
 #ifdef CONFIG_THUMB2_KERNEL
-#define SEV            ALT_SMP("sev.w", "nop.w")
 /*
  * For Thumb-2, special care is needed to ensure that the conditional WFE
  * instruction really does assemble to exactly 4 bytes (as required by
  * the assembler won't change IT instructions which are explicitly present
  * in the input.
  */
-#define WFE(cond)      ALT_SMP(                \
+#define WFE(cond)      __ALT_SMP_ASM(          \
        "it " cond "\n\t"                       \
        "wfe" cond ".n",                        \
                                                \
        "nop.w"                                 \
 )
 #else
-#define SEV            ALT_SMP("sev", "nop")
-#define WFE(cond)      ALT_SMP("wfe" cond, "nop")
+#define WFE(cond)      __ALT_SMP_ASM("wfe" cond, "nop")
 #endif
 
+#define SEV            __ALT_SMP_ASM(WASM(sev), WASM(nop))
+
 static inline void dsb_sev(void)
 {
 #if __LINUX_ARM_ARCH__ >= 7
@@ -77,6 +70,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
        u32 newval;
        arch_spinlock_t lockval;
 
+       prefetchw(&lock->slock);
        __asm__ __volatile__(
 "1:    ldrex   %0, [%3]\n"
 "      add     %1, %0, %4\n"
@@ -100,6 +94,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
        unsigned long contended, res;
        u32 slock;
 
+       prefetchw(&lock->slock);
        do {
                __asm__ __volatile__(
                "       ldrex   %0, [%3]\n"
@@ -127,10 +122,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
        dsb_sev();
 }
 
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+       return lock.tickets.owner == lock.tickets.next;
+}
+
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
-       struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
-       return tickets.owner != tickets.next;
+       return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
 }
 
 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
@@ -152,6 +151,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
 {
        unsigned long tmp;
 
+       prefetchw(&rw->lock);
        __asm__ __volatile__(
 "1:    ldrex   %0, [%1]\n"
 "      teq     %0, #0\n"
@@ -170,6 +170,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
        unsigned long contended, res;
 
+       prefetchw(&rw->lock);
        do {
                __asm__ __volatile__(
                "       ldrex   %0, [%2]\n"
@@ -203,7 +204,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
 }
 
 /* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x)         ((x)->lock == 0)
+#define arch_write_can_lock(x)         (ACCESS_ONCE((x)->lock) == 0)
 
 /*
  * Read locks are a bit more hairy:
@@ -221,6 +222,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        unsigned long tmp, tmp2;
 
+       prefetchw(&rw->lock);
        __asm__ __volatile__(
 "1:    ldrex   %0, [%2]\n"
 "      adds    %0, %0, #1\n"
@@ -241,6 +243,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
 
        smp_mb();
 
+       prefetchw(&rw->lock);
        __asm__ __volatile__(
 "1:    ldrex   %0, [%2]\n"
 "      sub     %0, %0, #1\n"
@@ -259,6 +262,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
        unsigned long contended, res;
 
+       prefetchw(&rw->lock);
        do {
                __asm__ __volatile__(
                "       ldrex   %0, [%2]\n"
@@ -280,7 +284,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
 }
 
 /* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x)          ((x)->lock < 0x80000000)
+#define arch_read_can_lock(x)          (ACCESS_ONCE((x)->lock) < 0x80000000)
 
 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)