]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Revert "Merge branch 'x86/spinlocks' into auto-latest"
authorStephen Rothwell <sfr@canb.auug.org.au>
Thu, 1 Sep 2011 02:07:58 +0000 (12:07 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 1 Sep 2011 02:07:58 +0000 (12:07 +1000)
This reverts commit 6f8fa39c81f12d98540598db42a1eaff65bba0ce, reversing
changes made to 4977f9bf2d9b511d36199ab3451a2592d6bc3793.

arch/x86/include/asm/cmpxchg_32.h
arch/x86/include/asm/cmpxchg_64.h
arch/x86/include/asm/spinlock.h
arch/x86/include/asm/spinlock_types.h

index d4b09d9a98efd962bf442554ab854d34d5ac4dea..3deb7250624c268c3b8ae819f6ef2c8081abc8c4 100644 (file)
@@ -280,27 +280,6 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
 
 #endif
 
-#define xadd(ptr, inc)                                                 \
-       do {                                                            \
-               switch (sizeof(*(ptr))) {                               \
-               case 1:                                                 \
-                       asm volatile (LOCK_PREFIX "xaddb %b0, %1\n"     \
-                                     : "+r" (inc), "+m" (*(ptr))       \
-                                     : : "memory", "cc");              \
-                       break;                                          \
-               case 2:                                                 \
-                       asm volatile (LOCK_PREFIX "xaddw %w0, %1\n"     \
-                                     : "+r" (inc), "+m" (*(ptr))       \
-                                     : : "memory", "cc");              \
-                       break;                                          \
-               case 4:                                                 \
-                       asm volatile (LOCK_PREFIX "xaddl %0, %1\n"      \
-                                     : "+r" (inc), "+m" (*(ptr))       \
-                                     : : "memory", "cc");              \
-                       break;                                          \
-               }                                                       \
-       } while(0)
-
 #define cmpxchg8b(ptr, o1, o2, n1, n2)                         \
 ({                                                             \
        char __ret;                                             \
index 8c5c836eccd6d1e43f2e6303809809f85f832e8a..7cf5c0a2443405532b274e872b7e2063ab57de8e 100644 (file)
@@ -151,32 +151,6 @@ extern void __cmpxchg_wrong_size(void);
        cmpxchg_local((ptr), (o), (n));                                 \
 })
 
-#define xadd(ptr, inc)                                                 \
-       do {                                                            \
-               switch (sizeof(*(ptr))) {                               \
-               case 1:                                                 \
-                       asm volatile (LOCK_PREFIX "xaddb %b0, %1\n"     \
-                                     : "+r" (inc), "+m" (*(ptr))       \
-                                     : : "memory", "cc");              \
-                       break;                                          \
-               case 2:                                                 \
-                       asm volatile (LOCK_PREFIX "xaddw %w0, %1\n"     \
-                                     : "+r" (inc), "+m" (*(ptr))       \
-                                     : : "memory", "cc");              \
-                       break;                                          \
-               case 4:                                                 \
-                       asm volatile (LOCK_PREFIX "xaddl %0, %1\n"      \
-                                     : "+r" (inc), "+m" (*(ptr))       \
-                                     : : "memory", "cc");              \
-                       break;                                          \
-               case 8:                                                 \
-                       asm volatile (LOCK_PREFIX "xaddq %q0, %1\n"     \
-                                     : "+r" (inc), "+m" (*(ptr))       \
-                                     : : "memory", "cc");              \
-                       break;                                          \
-               }                                                       \
-       } while(0)
-
 #define cmpxchg16b(ptr, o1, o2, n1, n2)                                \
 ({                                                             \
        char __ret;                                             \
index 7edca0d03c450d432298327e4db72a221422f45d..ee67edf86fdd98929998276ad8e5ab02c93c333a 100644 (file)
  * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
  * (PPro errata 66, 92)
  */
-static __always_inline void __ticket_unlock_release(struct arch_spinlock *lock)
-{
-       if (sizeof(lock->tickets.head) == sizeof(u8))
-               asm volatile(LOCK_PREFIX "incb %0"
-                            : "+m" (lock->tickets.head) : : "memory");
-       else
-               asm volatile(LOCK_PREFIX "incw %0"
-                            : "+m" (lock->tickets.head) : : "memory");
-
-}
+# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
 #else
-static __always_inline void __ticket_unlock_release(struct arch_spinlock *lock)
-{
-       lock->tickets.head++;
-}
+# define UNLOCK_LOCK_PREFIX
 #endif
 
 /*
@@ -66,63 +54,121 @@ static __always_inline void __ticket_unlock_release(struct arch_spinlock *lock)
  * save some instructions and make the code more elegant. There really isn't
  * much between them in performance though, especially as locks are out of line.
  */
-static __always_inline struct __raw_tickets __ticket_spin_claim(struct arch_spinlock *lock)
-{
-       register struct __raw_tickets tickets = { .tail = 1 };
-
-       xadd(&lock->tickets, tickets);
+#if (NR_CPUS < 256)
+#define TICKET_SHIFT 8
 
-       return tickets;
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
+{
+       short inc = 0x0100;
+
+       asm volatile (
+               LOCK_PREFIX "xaddw %w0, %1\n"
+               "1:\t"
+               "cmpb %h0, %b0\n\t"
+               "je 2f\n\t"
+               "rep ; nop\n\t"
+               "movb %1, %b0\n\t"
+               /* don't need lfence here, because loads are in-order */
+               "jmp 1b\n"
+               "2:"
+               : "+Q" (inc), "+m" (lock->slock)
+               :
+               : "memory", "cc");
 }
 
-static __always_inline void __ticket_spin_lock(struct arch_spinlock *lock)
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
 {
-       register struct __raw_tickets inc;
+       int tmp, new;
+
+       asm volatile("movzwl %2, %0\n\t"
+                    "cmpb %h0,%b0\n\t"
+                    "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
+                    "jne 1f\n\t"
+                    LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
+                    "1:"
+                    "sete %b1\n\t"
+                    "movzbl %b1,%0\n\t"
+                    : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
+                    :
+                    : "memory", "cc");
+
+       return tmp;
+}
 
-       inc = __ticket_spin_claim(lock);
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
+{
+       asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
+                    : "+m" (lock->slock)
+                    :
+                    : "memory", "cc");
+}
+#else
+#define TICKET_SHIFT 16
 
-       for (;;) {
-               if (inc.head == inc.tail)
-                       goto out;
-               cpu_relax();
-               inc.head = ACCESS_ONCE(lock->tickets.head);
-       }
-out:   barrier();              /* make sure nothing creeps before the lock is taken */
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
+{
+       int inc = 0x00010000;
+       int tmp;
+
+       asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
+                    "movzwl %w0, %2\n\t"
+                    "shrl $16, %0\n\t"
+                    "1:\t"
+                    "cmpl %0, %2\n\t"
+                    "je 2f\n\t"
+                    "rep ; nop\n\t"
+                    "movzwl %1, %2\n\t"
+                    /* don't need lfence here, because loads are in-order */
+                    "jmp 1b\n"
+                    "2:"
+                    : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
+                    :
+                    : "memory", "cc");
 }
 
 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
 {
-       arch_spinlock_t old, new;
-
-       old.tickets = ACCESS_ONCE(lock->tickets);
-       if (old.tickets.head != old.tickets.tail)
-               return 0;
-
-       new.head_tail = old.head_tail + (1 << TICKET_SHIFT);
-
-       /* cmpxchg is a full barrier, so nothing can move before it */
-       return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
+       int tmp;
+       int new;
+
+       asm volatile("movl %2,%0\n\t"
+                    "movl %0,%1\n\t"
+                    "roll $16, %0\n\t"
+                    "cmpl %0,%1\n\t"
+                    "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
+                    "jne 1f\n\t"
+                    LOCK_PREFIX "cmpxchgl %1,%2\n\t"
+                    "1:"
+                    "sete %b1\n\t"
+                    "movzbl %b1,%0\n\t"
+                    : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
+                    :
+                    : "memory", "cc");
+
+       return tmp;
 }
 
 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
 {
-       barrier();              /* prevent reordering out of locked region */
-       __ticket_unlock_release(lock);
-       barrier();              /* prevent reordering into locked region */
+       asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
+                    : "+m" (lock->slock)
+                    :
+                    : "memory", "cc");
 }
+#endif
 
 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
 {
-       struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+       int tmp = ACCESS_ONCE(lock->slock);
 
-       return !!(tmp.tail ^ tmp.head);
+       return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
 }
 
 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
 {
-       struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+       int tmp = ACCESS_ONCE(lock->slock);
 
-       return ((tmp.tail - tmp.head) & TICKET_MASK) > 1;
+       return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
 }
 
 #ifndef CONFIG_PARAVIRT_SPINLOCKS
index 8ebd5df7451e28b88a8a155bf3ff7dcb081ddb35..7c7a486fcb6811e68dc140085061e15df8bbd569 100644 (file)
@@ -5,29 +5,11 @@
 # error "please don't include this file directly"
 #endif
 
-#include <linux/types.h>
-
-#if (CONFIG_NR_CPUS < 256)
-typedef u8  __ticket_t;
-typedef u16 __ticketpair_t;
-#else
-typedef u16 __ticket_t;
-typedef u32 __ticketpair_t;
-#endif
-
-#define TICKET_SHIFT   (sizeof(__ticket_t) * 8)
-#define TICKET_MASK    ((__ticket_t)((1 << TICKET_SHIFT) - 1))
-
 typedef struct arch_spinlock {
-       union {
-               __ticketpair_t head_tail;
-               struct __raw_tickets {
-                       __ticket_t head, tail;
-               } tickets;
-       };
+       unsigned int slock;
 } arch_spinlock_t;
 
-#define __ARCH_SPIN_LOCK_UNLOCKED      { { 0 } }
+#define __ARCH_SPIN_LOCK_UNLOCKED      { 0 }
 
 #include <asm/rwlock.h>