]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/arm64/include/asm/atomic.h
Merge branch 'cpuidle' into release
[karo-tx-linux.git] / arch / arm64 / include / asm / atomic.h
index 866a71fca9a3c493f9f7c0f7e9bbb80baa49a1e8..35a67783cfa088d4166de9ff7ed6f993b899c09b 100644 (file)
 #include <linux/types.h>
 
 #include <asm/barrier.h>
-#include <asm/cmpxchg.h>
-
-#define ATOMIC_INIT(i) { (i) }
+#include <asm/lse.h>
 
 #ifdef __KERNEL__
 
-/*
- * On ARM, ordinary assignment (str instruction) doesn't clear the local
- * strex/ldrex monitor on some implementations. The reason we can use it for
- * atomic_set() is the clrex or dummy strex done on every exception return.
- */
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic_set(v,i)        (((v)->counter) = (i))
-
-/*
- * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
- * store exclusive to ensure that these are atomic.  We may loop
- * to ensure that the update happens.
- */
-
-#define ATOMIC_OP(op, asm_op)                                          \
-static inline void atomic_##op(int i, atomic_t *v)                     \
-{                                                                      \
-       unsigned long tmp;                                              \
-       int result;                                                     \
-                                                                       \
-       asm volatile("// atomic_" #op "\n"                              \
-"1:    ldxr    %w0, %2\n"                                              \
-"      " #asm_op "     %w0, %w0, %w3\n"                                \
-"      stxr    %w1, %w0, %2\n"                                         \
-"      cbnz    %w1, 1b"                                                \
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
-       : "Ir" (i));                                                    \
-}                                                                      \
-
-#define ATOMIC_OP_RETURN(op, asm_op)                                   \
-static inline int atomic_##op##_return(int i, atomic_t *v)             \
-{                                                                      \
-       unsigned long tmp;                                              \
-       int result;                                                     \
-                                                                       \
-       asm volatile("// atomic_" #op "_return\n"                       \
-"1:    ldxr    %w0, %2\n"                                              \
-"      " #asm_op "     %w0, %w0, %w3\n"                                \
-"      stlxr   %w1, %w0, %2\n"                                         \
-"      cbnz    %w1, 1b"                                                \
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
-       : "Ir" (i)                                                      \
-       : "memory");                                                    \
-                                                                       \
-       smp_mb();                                                       \
-       return result;                                                  \
-}
+#define __ARM64_IN_ATOMIC_IMPL
 
-#define ATOMIC_OPS(op, asm_op)                                         \
-       ATOMIC_OP(op, asm_op)                                           \
-       ATOMIC_OP_RETURN(op, asm_op)
-
-ATOMIC_OPS(add, add)
-ATOMIC_OPS(sub, sub)
-
-#define atomic_andnot atomic_andnot
-
-ATOMIC_OP(and, and)
-ATOMIC_OP(andnot, bic)
-ATOMIC_OP(or, orr)
-ATOMIC_OP(xor, eor)
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
-{
-       unsigned long tmp;
-       int oldval;
-
-       smp_mb();
-
-       asm volatile("// atomic_cmpxchg\n"
-"1:    ldxr    %w1, %2\n"
-"      cmp     %w1, %w3\n"
-"      b.ne    2f\n"
-"      stxr    %w0, %w4, %2\n"
-"      cbnz    %w0, 1b\n"
-"2:"
-       : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
-       : "Ir" (old), "r" (new)
-       : "cc");
-
-       smp_mb();
-       return oldval;
-}
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE)
+#include <asm/atomic_lse.h>
+#else
+#include <asm/atomic_ll_sc.h>
+#endif
 
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-       int c, old;
+#undef __ARM64_IN_ATOMIC_IMPL
 
-       c = atomic_read(v);
-       while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
-               c = old;
-       return c;
-}
+#include <asm/cmpxchg.h>
 
-#define atomic_inc(v)          atomic_add(1, v)
-#define atomic_dec(v)          atomic_sub(1, v)
+#define ___atomic_add_unless(v, a, u, sfx)                             \
+({                                                                     \
+       typeof((v)->counter) c, old;                                    \
+                                                                       \
+       c = atomic##sfx##_read(v);                                      \
+       while (c != (u) &&                                              \
+             (old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c)      \
+               c = old;                                                \
+       c;                                                              \
+ })
 
-#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v)    (atomic_add_return(1, v))
-#define atomic_dec_return(v)    (atomic_sub_return(1, v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+#define ATOMIC_INIT(i) { (i) }
 
-#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+#define atomic_read(v)                 READ_ONCE((v)->counter)
+#define atomic_set(v, i)               (((v)->counter) = (i))
+#define atomic_xchg(v, new)            xchg(&((v)->counter), (new))
+#define atomic_cmpxchg(v, old, new)    cmpxchg(&((v)->counter), (old), (new))
+
+#define atomic_inc(v)                  atomic_add(1, (v))
+#define atomic_dec(v)                  atomic_sub(1, (v))
+#define atomic_inc_return(v)           atomic_add_return(1, (v))
+#define atomic_dec_return(v)           atomic_sub_return(1, (v))
+#define atomic_inc_and_test(v)         (atomic_inc_return(v) == 0)
+#define atomic_dec_and_test(v)         (atomic_dec_return(v) == 0)
+#define atomic_sub_and_test(i, v)      (atomic_sub_return((i), (v)) == 0)
+#define atomic_add_negative(i, v)      (atomic_add_return((i), (v)) < 0)
+#define __atomic_add_unless(v, a, u)   ___atomic_add_unless(v, a, u,)
+#define atomic_andnot                  atomic_andnot
 
 /*
  * 64-bit atomic operations.
  */
-#define ATOMIC64_INIT(i) { (i) }
-
-#define atomic64_read(v)       ACCESS_ONCE((v)->counter)
-#define atomic64_set(v,i)      (((v)->counter) = (i))
-
-#define ATOMIC64_OP(op, asm_op)                                                \
-static inline void atomic64_##op(long i, atomic64_t *v)                        \
-{                                                                      \
-       long result;                                                    \
-       unsigned long tmp;                                              \
-                                                                       \
-       asm volatile("// atomic64_" #op "\n"                            \
-"1:    ldxr    %0, %2\n"                                               \
-"      " #asm_op "     %0, %0, %3\n"                                   \
-"      stxr    %w1, %0, %2\n"                                          \
-"      cbnz    %w1, 1b"                                                \
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
-       : "Ir" (i));                                                    \
-}                                                                      \
-
-#define ATOMIC64_OP_RETURN(op, asm_op)                                 \
-static inline long atomic64_##op##_return(long i, atomic64_t *v)       \
-{                                                                      \
-       long result;                                                    \
-       unsigned long tmp;                                              \
-                                                                       \
-       asm volatile("// atomic64_" #op "_return\n"                     \
-"1:    ldxr    %0, %2\n"                                               \
-"      " #asm_op "     %0, %0, %3\n"                                   \
-"      stlxr   %w1, %0, %2\n"                                          \
-"      cbnz    %w1, 1b"                                                \
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
-       : "Ir" (i)                                                      \
-       : "memory");                                                    \
-                                                                       \
-       smp_mb();                                                       \
-       return result;                                                  \
-}
-
-#define ATOMIC64_OPS(op, asm_op)                                       \
-       ATOMIC64_OP(op, asm_op)                                         \
-       ATOMIC64_OP_RETURN(op, asm_op)
-
-ATOMIC64_OPS(add, add)
-ATOMIC64_OPS(sub, sub)
-
-#define atomic64_andnot atomic64_andnot
-
-ATOMIC64_OP(and, and)
-ATOMIC64_OP(andnot, bic)
-ATOMIC64_OP(or, orr)
-ATOMIC64_OP(xor, eor)
-
-#undef ATOMIC64_OPS
-#undef ATOMIC64_OP_RETURN
-#undef ATOMIC64_OP
-
-static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
-{
-       long oldval;
-       unsigned long res;
-
-       smp_mb();
-
-       asm volatile("// atomic64_cmpxchg\n"
-"1:    ldxr    %1, %2\n"
-"      cmp     %1, %3\n"
-"      b.ne    2f\n"
-"      stxr    %w0, %4, %2\n"
-"      cbnz    %w0, 1b\n"
-"2:"
-       : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
-       : "Ir" (old), "r" (new)
-       : "cc");
-
-       smp_mb();
-       return oldval;
-}
-
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-
-static inline long atomic64_dec_if_positive(atomic64_t *v)
-{
-       long result;
-       unsigned long tmp;
-
-       asm volatile("// atomic64_dec_if_positive\n"
-"1:    ldxr    %0, %2\n"
-"      subs    %0, %0, #1\n"
-"      b.mi    2f\n"
-"      stlxr   %w1, %0, %2\n"
-"      cbnz    %w1, 1b\n"
-"      dmb     ish\n"
-"2:"
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-       :
-       : "cc", "memory");
-
-       return result;
-}
-
-static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
-       long c, old;
-
-       c = atomic64_read(v);
-       while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
-               c = old;
-
-       return c != u;
-}
-
-#define atomic64_add_negative(a, v)    (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v)                        atomic64_add(1LL, (v))
-#define atomic64_inc_return(v)         atomic64_add_return(1LL, (v))
+#define ATOMIC64_INIT                  ATOMIC_INIT
+#define atomic64_read                  atomic_read
+#define atomic64_set                   atomic_set
+#define atomic64_xchg                  atomic_xchg
+#define atomic64_cmpxchg               atomic_cmpxchg
+
+#define atomic64_inc(v)                        atomic64_add(1, (v))
+#define atomic64_dec(v)                        atomic64_sub(1, (v))
+#define atomic64_inc_return(v)         atomic64_add_return(1, (v))
+#define atomic64_dec_return(v)         atomic64_sub_return(1, (v))
 #define atomic64_inc_and_test(v)       (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v)    (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v)                        atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v)         atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1LL, 0LL)
+#define atomic64_dec_and_test(v)       (atomic64_dec_return(v) == 0)
+#define atomic64_sub_and_test(i, v)    (atomic64_sub_return((i), (v)) == 0)
+#define atomic64_add_negative(i, v)    (atomic64_add_return((i), (v)) < 0)
+#define atomic64_add_unless(v, a, u)   (___atomic_add_unless(v, a, u, 64) != u)
+#define atomic64_andnot                        atomic64_andnot
+
+#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
 
 #endif
 #endif