#endif
+ #define xadd(ptr, inc) \
+ do { \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ asm volatile (LOCK_PREFIX "xaddb %b0, %1\n" \
+ : "+r" (inc), "+m" (*(ptr)) \
+ : : "memory", "cc"); \
+ break; \
+ case 2: \
+ asm volatile (LOCK_PREFIX "xaddw %w0, %1\n" \
+ : "+r" (inc), "+m" (*(ptr)) \
+ : : "memory", "cc"); \
+ break; \
+ case 4: \
+ asm volatile (LOCK_PREFIX "xaddl %0, %1\n" \
+ : "+r" (inc), "+m" (*(ptr)) \
+ : : "memory", "cc"); \
+ break; \
+ } \
+ } while(0)
+
+#define cmpxchg8b(ptr, o1, o2, n1, n2) \
+({ \
+ char __ret; \
+ __typeof__(o2) __dummy; \
+ __typeof__(*(ptr)) __old1 = (o1); \
+ __typeof__(o2) __old2 = (o2); \
+ __typeof__(*(ptr)) __new1 = (n1); \
+ __typeof__(o2) __new2 = (n2); \
+ asm volatile(LOCK_PREFIX "cmpxchg8b %2; setz %1" \
+ : "=d"(__dummy), "=a" (__ret), "+m" (*ptr)\
+ : "a" (__old1), "d"(__old2), \
+ "b" (__new1), "c" (__new2) \
+ : "memory"); \
+ __ret; })
+
+
+#define cmpxchg8b_local(ptr, o1, o2, n1, n2) \
+({ \
+ char __ret; \
+ __typeof__(o2) __dummy; \
+ __typeof__(*(ptr)) __old1 = (o1); \
+ __typeof__(o2) __old2 = (o2); \
+ __typeof__(*(ptr)) __new1 = (n1); \
+ __typeof__(o2) __new2 = (n2); \
+ asm volatile("cmpxchg8b %2; setz %1" \
+ : "=d"(__dummy), "=a"(__ret), "+m" (*ptr)\
+ : "a" (__old), "d"(__old2), \
+ "b" (__new1), "c" (__new2), \
+ : "memory"); \
+ __ret; })
+
+
+#define cmpxchg_double(ptr, o1, o2, n1, n2) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ VM_BUG_ON((unsigned long)(ptr) % 8); \
+ cmpxchg8b((ptr), (o1), (o2), (n1), (n2)); \
+})
+
+#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ VM_BUG_ON((unsigned long)(ptr) % 8); \
+ cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
+})
+
+#define system_has_cmpxchg_double() cpu_has_cx8
+
#endif /* _ASM_X86_CMPXCHG_32_H */
cmpxchg_local((ptr), (o), (n)); \
})
+ #define xadd(ptr, inc) \
+ do { \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ asm volatile (LOCK_PREFIX "xaddb %b0, %1\n" \
+ : "+r" (inc), "+m" (*(ptr)) \
+ : : "memory", "cc"); \
+ break; \
+ case 2: \
+ asm volatile (LOCK_PREFIX "xaddw %w0, %1\n" \
+ : "+r" (inc), "+m" (*(ptr)) \
+ : : "memory", "cc"); \
+ break; \
+ case 4: \
+ asm volatile (LOCK_PREFIX "xaddl %0, %1\n" \
+ : "+r" (inc), "+m" (*(ptr)) \
+ : : "memory", "cc"); \
+ break; \
+ case 8: \
+ asm volatile (LOCK_PREFIX "xaddq %q0, %1\n" \
+ : "+r" (inc), "+m" (*(ptr)) \
+ : : "memory", "cc"); \
+ break; \
+ } \
+ } while(0)
+
+#define cmpxchg16b(ptr, o1, o2, n1, n2) \
+({ \
+ char __ret; \
+ __typeof__(o2) __junk; \
+ __typeof__(*(ptr)) __old1 = (o1); \
+ __typeof__(o2) __old2 = (o2); \
+ __typeof__(*(ptr)) __new1 = (n1); \
+ __typeof__(o2) __new2 = (n2); \
+ asm volatile(LOCK_PREFIX "cmpxchg16b %2;setz %1" \
+ : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
+ : "b"(__new1), "c"(__new2), \
+ "a"(__old1), "d"(__old2)); \
+ __ret; })
+
+
+#define cmpxchg16b_local(ptr, o1, o2, n1, n2) \
+({ \
+ char __ret; \
+ __typeof__(o2) __junk; \
+ __typeof__(*(ptr)) __old1 = (o1); \
+ __typeof__(o2) __old2 = (o2); \
+ __typeof__(*(ptr)) __new1 = (n1); \
+ __typeof__(o2) __new2 = (n2); \
+ asm volatile("cmpxchg16b %2;setz %1" \
+ : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
+ : "b"(__new1), "c"(__new2), \
+ "a"(__old1), "d"(__old2)); \
+ __ret; })
+
+#define cmpxchg_double(ptr, o1, o2, n1, n2) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ VM_BUG_ON((unsigned long)(ptr) % 16); \
+ cmpxchg16b((ptr), (o1), (o2), (n1), (n2)); \
+})
+
+#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ VM_BUG_ON((unsigned long)(ptr) % 16); \
+ cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
+})
+
+#define system_has_cmpxchg_double() cpu_has_cx16
+
#endif /* _ASM_X86_CMPXCHG_64_H */
# error "please don't include this file directly"
#endif
+ #include <linux/types.h>
+
+ #if (CONFIG_NR_CPUS < 256)
+ typedef u8 __ticket_t;
+ typedef u16 __ticketpair_t;
+ #else
+ typedef u16 __ticket_t;
+ typedef u32 __ticketpair_t;
+ #endif
+
+ #define TICKET_SHIFT (sizeof(__ticket_t) * 8)
+ #define TICKET_MASK ((__ticket_t)((1 << TICKET_SHIFT) - 1))
+
typedef struct arch_spinlock {
- unsigned int slock;
+ union {
+ __ticketpair_t head_tail;
+ struct __raw_tickets {
+ __ticket_t head, tail;
+ } tickets;
+ };
} arch_spinlock_t;
- #define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+ #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
-typedef struct {
- unsigned int lock;
-} arch_rwlock_t;
-
-#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#include <asm/rwlock.h>
#endif /* _ASM_X86_SPINLOCK_TYPES_H */