]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/arm64/include/asm/atomic_ll_sc.h
Merge remote-tracking branch 'xfs/for-next'
[karo-tx-linux.git] / arch / arm64 / include / asm / atomic_ll_sc.h
index b3b5c4ae3800b061d5ad2fdd911aea677feedc32..74d0b8eb0799cb6635b999f7afb1c7e7c0a361bf 100644 (file)
@@ -55,40 +55,47 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v))                             \
 }                                                                      \
 __LL_SC_EXPORT(atomic_##op);
 
-#define ATOMIC_OP_RETURN(op, asm_op)                                   \
+#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)           \
 __LL_SC_INLINE int                                                     \
-__LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v))               \
+__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v))         \
 {                                                                      \
        unsigned long tmp;                                              \
        int result;                                                     \
                                                                        \
-       asm volatile("// atomic_" #op "_return\n"                       \
+       asm volatile("// atomic_" #op "_return" #name "\n"              \
 "      prfm    pstl1strm, %2\n"                                        \
-"1:    ldxr    %w0, %2\n"                                              \
+"1:    ld" #acq "xr    %w0, %2\n"                                      \
 "      " #asm_op "     %w0, %w0, %w3\n"                                \
-"      stlxr   %w1, %w0, %2\n"                                         \
-"      cbnz    %w1, 1b"                                                \
+"      st" #rel "xr    %w1, %w0, %2\n"                                 \
+"      cbnz    %w1, 1b\n"                                              \
+"      " #mb                                                           \
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
        : "Ir" (i)                                                      \
-       : "memory");                                                    \
+       : cl);                                                          \
                                                                        \
-       smp_mb();                                                       \
        return result;                                                  \
 }                                                                      \
-__LL_SC_EXPORT(atomic_##op##_return);
+__LL_SC_EXPORT(atomic_##op##_return##name);
+
+#define ATOMIC_OPS(...)                                                        \
+       ATOMIC_OP(__VA_ARGS__)                                          \
+       ATOMIC_OP_RETURN(        , dmb ish,  , l, "memory", __VA_ARGS__)
 
-#define ATOMIC_OPS(op, asm_op)                                         \
-       ATOMIC_OP(op, asm_op)                                           \
-       ATOMIC_OP_RETURN(op, asm_op)
+#define ATOMIC_OPS_RLX(...)                                            \
+       ATOMIC_OPS(__VA_ARGS__)                                         \
+       ATOMIC_OP_RETURN(_relaxed,        ,  ,  ,         , __VA_ARGS__)\
+       ATOMIC_OP_RETURN(_acquire,        , a,  , "memory", __VA_ARGS__)\
+       ATOMIC_OP_RETURN(_release,        ,  , l, "memory", __VA_ARGS__)
 
-ATOMIC_OPS(add, add)
-ATOMIC_OPS(sub, sub)
+ATOMIC_OPS_RLX(add, add)
+ATOMIC_OPS_RLX(sub, sub)
 
 ATOMIC_OP(and, and)
 ATOMIC_OP(andnot, bic)
 ATOMIC_OP(or, orr)
 ATOMIC_OP(xor, eor)
 
+#undef ATOMIC_OPS_RLX
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -111,40 +118,47 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v))                      \
 }                                                                      \
 __LL_SC_EXPORT(atomic64_##op);
 
-#define ATOMIC64_OP_RETURN(op, asm_op)                                 \
+#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)         \
 __LL_SC_INLINE long                                                    \
-__LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v))          \
+__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v))    \
 {                                                                      \
        long result;                                                    \
        unsigned long tmp;                                              \
                                                                        \
-       asm volatile("// atomic64_" #op "_return\n"                     \
+       asm volatile("// atomic64_" #op "_return" #name "\n"            \
 "      prfm    pstl1strm, %2\n"                                        \
-"1:    ldxr    %0, %2\n"                                               \
+"1:    ld" #acq "xr    %0, %2\n"                                       \
 "      " #asm_op "     %0, %0, %3\n"                                   \
-"      stlxr   %w1, %0, %2\n"                                          \
-"      cbnz    %w1, 1b"                                                \
+"      st" #rel "xr    %w1, %0, %2\n"                                  \
+"      cbnz    %w1, 1b\n"                                              \
+"      " #mb                                                           \
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
        : "Ir" (i)                                                      \
-       : "memory");                                                    \
+       : cl);                                                          \
                                                                        \
-       smp_mb();                                                       \
        return result;                                                  \
 }                                                                      \
-__LL_SC_EXPORT(atomic64_##op##_return);
+__LL_SC_EXPORT(atomic64_##op##_return##name);
+
+#define ATOMIC64_OPS(...)                                              \
+       ATOMIC64_OP(__VA_ARGS__)                                        \
+       ATOMIC64_OP_RETURN(, dmb ish,  , l, "memory", __VA_ARGS__)
 
-#define ATOMIC64_OPS(op, asm_op)                                       \
-       ATOMIC64_OP(op, asm_op)                                         \
-       ATOMIC64_OP_RETURN(op, asm_op)
+#define ATOMIC64_OPS_RLX(...)                                          \
+       ATOMIC64_OPS(__VA_ARGS__)                                       \
+       ATOMIC64_OP_RETURN(_relaxed,,  ,  ,         , __VA_ARGS__)      \
+       ATOMIC64_OP_RETURN(_acquire,, a,  , "memory", __VA_ARGS__)      \
+       ATOMIC64_OP_RETURN(_release,,  , l, "memory", __VA_ARGS__)
 
-ATOMIC64_OPS(add, add)
-ATOMIC64_OPS(sub, sub)
+ATOMIC64_OPS_RLX(add, add)
+ATOMIC64_OPS_RLX(sub, sub)
 
 ATOMIC64_OP(and, and)
 ATOMIC64_OP(andnot, bic)
 ATOMIC64_OP(or, orr)
 ATOMIC64_OP(xor, eor)
 
+#undef ATOMIC64_OPS_RLX
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
@@ -172,7 +186,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
 }
 __LL_SC_EXPORT(atomic64_dec_if_positive);
 
-#define __CMPXCHG_CASE(w, sz, name, mb, rel, cl)                       \
+#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl)                  \
 __LL_SC_INLINE unsigned long                                           \
 __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr,               \
                                     unsigned long old,                 \
@@ -182,7 +196,7 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr,            \
                                                                        \
        asm volatile(                                                   \
        "       prfm    pstl1strm, %[v]\n"                              \
-       "1:     ldxr" #sz "\t%" #w "[oldval], %[v]\n"                   \
+       "1:     ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n"           \
        "       eor     %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n"  \
        "       cbnz    %" #w "[tmp], 2f\n"                             \
        "       st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n"     \
@@ -199,14 +213,22 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr,          \
 }                                                                      \
 __LL_SC_EXPORT(__cmpxchg_case_##name);
 
-__CMPXCHG_CASE(w, b,    1,        ,  ,         )
-__CMPXCHG_CASE(w, h,    2,        ,  ,         )
-__CMPXCHG_CASE(w,  ,    4,        ,  ,         )
-__CMPXCHG_CASE( ,  ,    8,        ,  ,         )
-__CMPXCHG_CASE(w, b, mb_1, dmb ish, l, "memory")
-__CMPXCHG_CASE(w, h, mb_2, dmb ish, l, "memory")
-__CMPXCHG_CASE(w,  , mb_4, dmb ish, l, "memory")
-__CMPXCHG_CASE( ,  , mb_8, dmb ish, l, "memory")
+__CMPXCHG_CASE(w, b,     1,        ,  ,  ,         )
+__CMPXCHG_CASE(w, h,     2,        ,  ,  ,         )
+__CMPXCHG_CASE(w,  ,     4,        ,  ,  ,         )
+__CMPXCHG_CASE( ,  ,     8,        ,  ,  ,         )
+__CMPXCHG_CASE(w, b, acq_1,        , a,  , "memory")
+__CMPXCHG_CASE(w, h, acq_2,        , a,  , "memory")
+__CMPXCHG_CASE(w,  , acq_4,        , a,  , "memory")
+__CMPXCHG_CASE( ,  , acq_8,        , a,  , "memory")
+__CMPXCHG_CASE(w, b, rel_1,        ,  , l, "memory")
+__CMPXCHG_CASE(w, h, rel_2,        ,  , l, "memory")
+__CMPXCHG_CASE(w,  , rel_4,        ,  , l, "memory")
+__CMPXCHG_CASE( ,  , rel_8,        ,  , l, "memory")
+__CMPXCHG_CASE(w, b,  mb_1, dmb ish,  , l, "memory")
+__CMPXCHG_CASE(w, h,  mb_2, dmb ish,  , l, "memory")
+__CMPXCHG_CASE(w,  ,  mb_4, dmb ish,  , l, "memory")
+__CMPXCHG_CASE( ,  ,  mb_8, dmb ish,  , l, "memory")
 
 #undef __CMPXCHG_CASE