]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/arm64/include/asm/atomic.h
arm64: atomics: patch in lse instructions when supported by the CPU
[karo-tx-linux.git] / arch / arm64 / include / asm / atomic.h
index 7047051ded40e3e9c3cc944fe0ce456bb6eaf897..836226d5e12cd6d56a6192200747e7f9ae1a6798 100644 (file)
 
 #include <asm/barrier.h>
 #include <asm/cmpxchg.h>
+#include <asm/lse.h>
 
 #define ATOMIC_INIT(i) { (i) }
 
 #ifdef __KERNEL__
 
+#define __ARM64_IN_ATOMIC_IMPL
+
+#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE)
+#include <asm/atomic_lse.h>
+#else
+#include <asm/atomic_ll_sc.h>
+#endif
+
+#undef __ARM64_IN_ATOMIC_IMPL
+
 /*
  * On ARM, ordinary assignment (str instruction) doesn't clear the local
  * strex/ldrex monitor on some implementations. The reason we can use it for
 #define atomic_read(v) ACCESS_ONCE((v)->counter)
 #define atomic_set(v,i)        (((v)->counter) = (i))
 
-/*
- * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
- * store exclusive to ensure that these are atomic.  We may loop
- * to ensure that the update happens.
- */
-
-#define ATOMIC_OP(op, asm_op)                                          \
-static inline void atomic_##op(int i, atomic_t *v)                     \
-{                                                                      \
-       unsigned long tmp;                                              \
-       int result;                                                     \
-                                                                       \
-       asm volatile("// atomic_" #op "\n"                              \
-"1:    ldxr    %w0, %2\n"                                              \
-"      " #asm_op "     %w0, %w0, %w3\n"                                \
-"      stxr    %w1, %w0, %2\n"                                         \
-"      cbnz    %w1, 1b"                                                \
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
-       : "Ir" (i));                                                    \
-}                                                                      \
-
-#define ATOMIC_OP_RETURN(op, asm_op)                                   \
-static inline int atomic_##op##_return(int i, atomic_t *v)             \
-{                                                                      \
-       unsigned long tmp;                                              \
-       int result;                                                     \
-                                                                       \
-       asm volatile("// atomic_" #op "_return\n"                       \
-"1:    ldxr    %w0, %2\n"                                              \
-"      " #asm_op "     %w0, %w0, %w3\n"                                \
-"      stlxr   %w1, %w0, %2\n"                                         \
-"      cbnz    %w1, 1b"                                                \
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
-       : "Ir" (i)                                                      \
-       : "memory");                                                    \
-                                                                       \
-       smp_mb();                                                       \
-       return result;                                                  \
-}
-
-#define ATOMIC_OPS(op, asm_op)                                         \
-       ATOMIC_OP(op, asm_op)                                           \
-       ATOMIC_OP_RETURN(op, asm_op)
-
-ATOMIC_OPS(add, add)
-ATOMIC_OPS(sub, sub)
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
-{
-       unsigned long tmp;
-       int oldval;
-
-       smp_mb();
-
-       asm volatile("// atomic_cmpxchg\n"
-"1:    ldxr    %w1, %2\n"
-"      cmp     %w1, %w3\n"
-"      b.ne    2f\n"
-"      stxr    %w0, %w4, %2\n"
-"      cbnz    %w0, 1b\n"
-"2:"
-       : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
-       : "Ir" (old), "r" (new)
-       : "cc");
-
-       smp_mb();
-       return oldval;
-}
-
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
@@ -134,6 +72,8 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 
 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
 
+#define atomic_andnot atomic_andnot
+
 /*
  * 64-bit atomic operations.
  */
@@ -142,95 +82,8 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 #define atomic64_read(v)       ACCESS_ONCE((v)->counter)
 #define atomic64_set(v,i)      (((v)->counter) = (i))
 
-#define ATOMIC64_OP(op, asm_op)                                                \
-static inline void atomic64_##op(long i, atomic64_t *v)                        \
-{                                                                      \
-       long result;                                                    \
-       unsigned long tmp;                                              \
-                                                                       \
-       asm volatile("// atomic64_" #op "\n"                            \
-"1:    ldxr    %0, %2\n"                                               \
-"      " #asm_op "     %0, %0, %3\n"                                   \
-"      stxr    %w1, %0, %2\n"                                          \
-"      cbnz    %w1, 1b"                                                \
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
-       : "Ir" (i));                                                    \
-}                                                                      \
-
-#define ATOMIC64_OP_RETURN(op, asm_op)                                 \
-static inline long atomic64_##op##_return(long i, atomic64_t *v)       \
-{                                                                      \
-       long result;                                                    \
-       unsigned long tmp;                                              \
-                                                                       \
-       asm volatile("// atomic64_" #op "_return\n"                     \
-"1:    ldxr    %0, %2\n"                                               \
-"      " #asm_op "     %0, %0, %3\n"                                   \
-"      stlxr   %w1, %0, %2\n"                                          \
-"      cbnz    %w1, 1b"                                                \
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
-       : "Ir" (i)                                                      \
-       : "memory");                                                    \
-                                                                       \
-       smp_mb();                                                       \
-       return result;                                                  \
-}
-
-#define ATOMIC64_OPS(op, asm_op)                                       \
-       ATOMIC64_OP(op, asm_op)                                         \
-       ATOMIC64_OP_RETURN(op, asm_op)
-
-ATOMIC64_OPS(add, add)
-ATOMIC64_OPS(sub, sub)
-
-#undef ATOMIC64_OPS
-#undef ATOMIC64_OP_RETURN
-#undef ATOMIC64_OP
-
-static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
-{
-       long oldval;
-       unsigned long res;
-
-       smp_mb();
-
-       asm volatile("// atomic64_cmpxchg\n"
-"1:    ldxr    %1, %2\n"
-"      cmp     %1, %3\n"
-"      b.ne    2f\n"
-"      stxr    %w0, %4, %2\n"
-"      cbnz    %w0, 1b\n"
-"2:"
-       : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
-       : "Ir" (old), "r" (new)
-       : "cc");
-
-       smp_mb();
-       return oldval;
-}
-
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
-static inline long atomic64_dec_if_positive(atomic64_t *v)
-{
-       long result;
-       unsigned long tmp;
-
-       asm volatile("// atomic64_dec_if_positive\n"
-"1:    ldxr    %0, %2\n"
-"      subs    %0, %0, #1\n"
-"      b.mi    2f\n"
-"      stlxr   %w1, %0, %2\n"
-"      cbnz    %w1, 1b\n"
-"      dmb     ish\n"
-"2:"
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-       :
-       : "cc", "memory");
-
-       return result;
-}
-
 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
 {
        long c, old;
@@ -252,5 +105,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
 #define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
 #define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1LL, 0LL)
 
+#define atomic64_andnot atomic64_andnot
+
 #endif
 #endif