]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/x86/include/asm/atomic64_64.h
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
[karo-tx-linux.git] / arch / x86 / include / asm / atomic64_64.h
index 037351022f5483c99a3f0a42dd4646f1eb0a3055..89ed2f6ae2f76accf15ad16810c6aceb4a1854ae 100644 (file)
@@ -70,9 +70,9 @@ static inline void atomic64_sub(long i, atomic64_t *v)
  * true if the result is zero, or false for all
  * other cases.
  */
-static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
+       GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
 }
 
 /**
@@ -109,9 +109,9 @@ static __always_inline void atomic64_dec(atomic64_t *v)
  * returns true if the result is 0, or false for all other
  * cases.
  */
-static inline int atomic64_dec_and_test(atomic64_t *v)
+static inline bool atomic64_dec_and_test(atomic64_t *v)
 {
-       GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
+       GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
 }
 
 /**
@@ -122,9 +122,9 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
  * and returns true if the result is zero, or false for all
  * other cases.
  */
-static inline int atomic64_inc_and_test(atomic64_t *v)
+static inline bool atomic64_inc_and_test(atomic64_t *v)
 {
-       GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
+       GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
 }
 
 /**
@@ -136,9 +136,9 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
  * if the result is negative, or false when
  * result is greater than or equal to zero.
  */
-static inline int atomic64_add_negative(long i, atomic64_t *v)
+static inline bool atomic64_add_negative(long i, atomic64_t *v)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
+       GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
 }
 
 /**
@@ -158,6 +158,16 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
        return atomic64_add_return(-i, v);
 }
 
+static inline long atomic64_fetch_add(long i, atomic64_t *v)
+{
+       return xadd(&v->counter, i);
+}
+
+static inline long atomic64_fetch_sub(long i, atomic64_t *v)
+{
+       return xadd(&v->counter, -i);
+}
+
 #define atomic64_inc_return(v)  (atomic64_add_return(1, (v)))
 #define atomic64_dec_return(v)  (atomic64_sub_return(1, (v)))
 
@@ -180,7 +190,7 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns the old value of @v.
  */
-static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
 {
        long c, old;
        c = atomic64_read(v);
@@ -229,10 +239,29 @@ static inline void atomic64_##op(long i, atomic64_t *v)                   \
                        : "memory");                                    \
 }
 
-ATOMIC64_OP(and)
-ATOMIC64_OP(or)
-ATOMIC64_OP(xor)
+#define ATOMIC64_FETCH_OP(op, c_op)                                    \
+static inline long atomic64_fetch_##op(long i, atomic64_t *v)          \
+{                                                                      \
+       long old, val = atomic64_read(v);                               \
+       for (;;) {                                                      \
+               old = atomic64_cmpxchg(v, val, val c_op i);             \
+               if (old == val)                                         \
+                       break;                                          \
+               val = old;                                              \
+       }                                                               \
+       return old;                                                     \
+}
+
+#define ATOMIC64_OPS(op, c_op)                                         \
+       ATOMIC64_OP(op)                                                 \
+       ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(and, &)
+ATOMIC64_OPS(or, |)
+ATOMIC64_OPS(xor, ^)
 
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP
 
 #endif /* _ASM_X86_ATOMIC64_64_H */