]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'x86/spinlocks' into auto-latest
authorIngo Molnar <mingo@elte.hu>
Tue, 9 Aug 2011 09:55:36 +0000 (11:55 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 9 Aug 2011 09:55:36 +0000 (11:55 +0200)
Conflicts:
arch/x86/include/asm/cmpxchg_32.h
arch/x86/include/asm/cmpxchg_64.h

1  2 
arch/x86/include/asm/cmpxchg_32.h
arch/x86/include/asm/cmpxchg_64.h
arch/x86/include/asm/spinlock.h
arch/x86/include/asm/spinlock_types.h

index 3deb7250624c268c3b8ae819f6ef2c8081abc8c4,30f0318bccdd3df69f4bd8140d220fe59f0607ac..d4b09d9a98efd962bf442554ab854d34d5ac4dea
@@@ -280,52 -280,25 +280,73 @@@ static inline unsigned long cmpxchg_386
  
  #endif
  
+ #define xadd(ptr, inc)                                                        \
+       do {                                                            \
+               switch (sizeof(*(ptr))) {                               \
+               case 1:                                                 \
+                       asm volatile (LOCK_PREFIX "xaddb %b0, %1\n"     \
+                                     : "+r" (inc), "+m" (*(ptr))       \
+                                     : : "memory", "cc");              \
+                       break;                                          \
+               case 2:                                                 \
+                       asm volatile (LOCK_PREFIX "xaddw %w0, %1\n"     \
+                                     : "+r" (inc), "+m" (*(ptr))       \
+                                     : : "memory", "cc");              \
+                       break;                                          \
+               case 4:                                                 \
+                       asm volatile (LOCK_PREFIX "xaddl %0, %1\n"      \
+                                     : "+r" (inc), "+m" (*(ptr))       \
+                                     : : "memory", "cc");              \
+                       break;                                          \
+               }                                                       \
+       } while(0)
 +#define cmpxchg8b(ptr, o1, o2, n1, n2)                                \
 +({                                                            \
 +      char __ret;                                             \
 +      __typeof__(o2) __dummy;                                 \
 +      __typeof__(*(ptr)) __old1 = (o1);                       \
 +      __typeof__(o2) __old2 = (o2);                           \
 +      __typeof__(*(ptr)) __new1 = (n1);                       \
 +      __typeof__(o2) __new2 = (n2);                           \
 +      asm volatile(LOCK_PREFIX "cmpxchg8b %2; setz %1"        \
 +                     : "=d"(__dummy), "=a" (__ret), "+m" (*ptr)\
 +                     : "a" (__old1), "d"(__old2),             \
 +                       "b" (__new1), "c" (__new2)             \
 +                     : "memory");                             \
 +      __ret; })
 +
 +
 +#define cmpxchg8b_local(ptr, o1, o2, n1, n2)                  \
 +({                                                            \
 +      char __ret;                                             \
 +      __typeof__(o2) __dummy;                                 \
 +      __typeof__(*(ptr)) __old1 = (o1);                       \
 +      __typeof__(o2) __old2 = (o2);                           \
 +      __typeof__(*(ptr)) __new1 = (n1);                       \
 +      __typeof__(o2) __new2 = (n2);                           \
 +      asm volatile("cmpxchg8b %2; setz %1"                    \
 +                     : "=d"(__dummy), "=a"(__ret), "+m" (*ptr)\
 +                     : "a" (__old), "d"(__old2),              \
 +                       "b" (__new1), "c" (__new2),            \
 +                     : "memory");                             \
 +      __ret; })
 +
 +
 +#define cmpxchg_double(ptr, o1, o2, n1, n2)                           \
 +({                                                                    \
 +      BUILD_BUG_ON(sizeof(*(ptr)) != 4);                              \
 +      VM_BUG_ON((unsigned long)(ptr) % 8);                            \
 +      cmpxchg8b((ptr), (o1), (o2), (n1), (n2));                       \
 +})
 +
 +#define cmpxchg_double_local(ptr, o1, o2, n1, n2)                     \
 +({                                                                    \
 +       BUILD_BUG_ON(sizeof(*(ptr)) != 4);                             \
 +       VM_BUG_ON((unsigned long)(ptr) % 8);                           \
 +       cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2));                       \
 +})
 +
 +#define system_has_cmpxchg_double() cpu_has_cx8
 +
  #endif /* _ASM_X86_CMPXCHG_32_H */
index 7cf5c0a2443405532b274e872b7e2063ab57de8e,62da1ffc9a310b160b97c94c76a9f514d0c60eff..8c5c836eccd6d1e43f2e6303809809f85f832e8a
@@@ -151,49 -151,30 +151,75 @@@ extern void __cmpxchg_wrong_size(void)
        cmpxchg_local((ptr), (o), (n));                                 \
  })
  
+ #define xadd(ptr, inc)                                                        \
+       do {                                                            \
+               switch (sizeof(*(ptr))) {                               \
+               case 1:                                                 \
+                       asm volatile (LOCK_PREFIX "xaddb %b0, %1\n"     \
+                                     : "+r" (inc), "+m" (*(ptr))       \
+                                     : : "memory", "cc");              \
+                       break;                                          \
+               case 2:                                                 \
+                       asm volatile (LOCK_PREFIX "xaddw %w0, %1\n"     \
+                                     : "+r" (inc), "+m" (*(ptr))       \
+                                     : : "memory", "cc");              \
+                       break;                                          \
+               case 4:                                                 \
+                       asm volatile (LOCK_PREFIX "xaddl %0, %1\n"      \
+                                     : "+r" (inc), "+m" (*(ptr))       \
+                                     : : "memory", "cc");              \
+                       break;                                          \
+               case 8:                                                 \
+                       asm volatile (LOCK_PREFIX "xaddq %q0, %1\n"     \
+                                     : "+r" (inc), "+m" (*(ptr))       \
+                                     : : "memory", "cc");              \
+                       break;                                          \
+               }                                                       \
+       } while(0)
 +#define cmpxchg16b(ptr, o1, o2, n1, n2)                               \
 +({                                                            \
 +      char __ret;                                             \
 +      __typeof__(o2) __junk;                                  \
 +      __typeof__(*(ptr)) __old1 = (o1);                       \
 +      __typeof__(o2) __old2 = (o2);                           \
 +      __typeof__(*(ptr)) __new1 = (n1);                       \
 +      __typeof__(o2) __new2 = (n2);                           \
 +      asm volatile(LOCK_PREFIX "cmpxchg16b %2;setz %1"        \
 +                     : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
 +                     : "b"(__new1), "c"(__new2),              \
 +                       "a"(__old1), "d"(__old2));             \
 +      __ret; })
 +
 +
 +#define cmpxchg16b_local(ptr, o1, o2, n1, n2)                 \
 +({                                                            \
 +      char __ret;                                             \
 +      __typeof__(o2) __junk;                                  \
 +      __typeof__(*(ptr)) __old1 = (o1);                       \
 +      __typeof__(o2) __old2 = (o2);                           \
 +      __typeof__(*(ptr)) __new1 = (n1);                       \
 +      __typeof__(o2) __new2 = (n2);                           \
 +      asm volatile("cmpxchg16b %2;setz %1"                    \
 +                     : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
 +                     : "b"(__new1), "c"(__new2),              \
 +                       "a"(__old1), "d"(__old2));             \
 +      __ret; })
 +
 +#define cmpxchg_double(ptr, o1, o2, n1, n2)                           \
 +({                                                                    \
 +      BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
 +      VM_BUG_ON((unsigned long)(ptr) % 16);                           \
 +      cmpxchg16b((ptr), (o1), (o2), (n1), (n2));                      \
 +})
 +
 +#define cmpxchg_double_local(ptr, o1, o2, n1, n2)                     \
 +({                                                                    \
 +      BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
 +      VM_BUG_ON((unsigned long)(ptr) % 16);                           \
 +      cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2));                \
 +})
 +
 +#define system_has_cmpxchg_double() cpu_has_cx16
 +
  #endif /* _ASM_X86_CMPXCHG_64_H */
Simple merge
index 7c7a486fcb6811e68dc140085061e15df8bbd569,72e154eb939d604a0715cdff079d7d88af126843..8ebd5df7451e28b88a8a155bf3ff7dcb081ddb35
@@@ -5,12 -5,34 +5,30 @@@
  # error "please don't include this file directly"
  #endif
  
+ #include <linux/types.h>
+ #if (CONFIG_NR_CPUS < 256)
+ typedef u8  __ticket_t;
+ typedef u16 __ticketpair_t;
+ #else
+ typedef u16 __ticket_t;
+ typedef u32 __ticketpair_t;
+ #endif
+ #define TICKET_SHIFT  (sizeof(__ticket_t) * 8)
+ #define TICKET_MASK   ((__ticket_t)((1 << TICKET_SHIFT) - 1))
  typedef struct arch_spinlock {
-       unsigned int slock;
+       union {
+               __ticketpair_t head_tail;
+               struct __raw_tickets {
+                       __ticket_t head, tail;
+               } tickets;
+       };
  } arch_spinlock_t;
  
- #define __ARCH_SPIN_LOCK_UNLOCKED     { 0 }
+ #define __ARCH_SPIN_LOCK_UNLOCKED     { { 0 } }
  
 -typedef struct {
 -      unsigned int lock;
 -} arch_rwlock_t;
 -
 -#define __ARCH_RW_LOCK_UNLOCKED               { RW_LOCK_BIAS }
 +#include <asm/rwlock.h>
  
  #endif /* _ASM_X86_SPINLOCK_TYPES_H */