]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/arm64/include/asm/atomic_ll_sc.h
Merge remote-tracking branch 'omap_dss2/for-next'
[karo-tx-linux.git] / arch / arm64 / include / asm / atomic_ll_sc.h
1 /*
2  * Based on arch/arm/include/asm/atomic.h
3  *
4  * Copyright (C) 1996 Russell King.
5  * Copyright (C) 2002 Deep Blue Solutions Ltd.
6  * Copyright (C) 2012 ARM Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #ifndef __ASM_ATOMIC_LL_SC_H
22 #define __ASM_ATOMIC_LL_SC_H
23
24 #ifndef __ARM64_IN_ATOMIC_IMPL
25 #error "please don't include this file directly"
26 #endif
27
28 /*
29  * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
30  * store exclusive to ensure that these are atomic.  We may loop
31  * to ensure that the update happens.
32  *
33  * NOTE: these functions do *not* follow the PCS and must explicitly
34  * save any clobbered registers other than x0 (regardless of return
35  * value).  This is achieved through -fcall-saved-* compiler flags for
36  * this file, which unfortunately don't work on a per-function basis
37  * (the optimize attribute silently ignores these options).
38  */
39
40 #define ATOMIC_OP(op, asm_op)                                           \
41 __LL_SC_INLINE void                                                     \
42 __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v))                         \
43 {                                                                       \
44         unsigned long tmp;                                              \
45         int result;                                                     \
46                                                                         \
47         asm volatile("// atomic_" #op "\n"                              \
48 "       prfm    pstl1strm, %2\n"                                        \
49 "1:     ldxr    %w0, %2\n"                                              \
50 "       " #asm_op "     %w0, %w0, %w3\n"                                \
51 "       stxr    %w1, %w0, %2\n"                                         \
52 "       cbnz    %w1, 1b"                                                \
53         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
54         : "Ir" (i));                                                    \
55 }                                                                       \
56 __LL_SC_EXPORT(atomic_##op);
57
58 #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)            \
59 __LL_SC_INLINE int                                                      \
60 __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v))          \
61 {                                                                       \
62         unsigned long tmp;                                              \
63         int result;                                                     \
64                                                                         \
65         asm volatile("// atomic_" #op "_return" #name "\n"              \
66 "       prfm    pstl1strm, %2\n"                                        \
67 "1:     ld" #acq "xr    %w0, %2\n"                                      \
68 "       " #asm_op "     %w0, %w0, %w3\n"                                \
69 "       st" #rel "xr    %w1, %w0, %2\n"                                 \
70 "       cbnz    %w1, 1b\n"                                              \
71 "       " #mb                                                           \
72         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
73         : "Ir" (i)                                                      \
74         : cl);                                                          \
75                                                                         \
76         return result;                                                  \
77 }                                                                       \
78 __LL_SC_EXPORT(atomic_##op##_return##name);
79
80 #define ATOMIC_OPS(...)                                                 \
81         ATOMIC_OP(__VA_ARGS__)                                          \
82         ATOMIC_OP_RETURN(        , dmb ish,  , l, "memory", __VA_ARGS__)
83
84 #define ATOMIC_OPS_RLX(...)                                             \
85         ATOMIC_OPS(__VA_ARGS__)                                         \
86         ATOMIC_OP_RETURN(_relaxed,        ,  ,  ,         , __VA_ARGS__)\
87         ATOMIC_OP_RETURN(_acquire,        , a,  , "memory", __VA_ARGS__)\
88         ATOMIC_OP_RETURN(_release,        ,  , l, "memory", __VA_ARGS__)
89
90 ATOMIC_OPS_RLX(add, add)
91 ATOMIC_OPS_RLX(sub, sub)
92
93 ATOMIC_OP(and, and)
94 ATOMIC_OP(andnot, bic)
95 ATOMIC_OP(or, orr)
96 ATOMIC_OP(xor, eor)
97
98 #undef ATOMIC_OPS_RLX
99 #undef ATOMIC_OPS
100 #undef ATOMIC_OP_RETURN
101 #undef ATOMIC_OP
102
103 #define ATOMIC64_OP(op, asm_op)                                         \
104 __LL_SC_INLINE void                                                     \
105 __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v))                    \
106 {                                                                       \
107         long result;                                                    \
108         unsigned long tmp;                                              \
109                                                                         \
110         asm volatile("// atomic64_" #op "\n"                            \
111 "       prfm    pstl1strm, %2\n"                                        \
112 "1:     ldxr    %0, %2\n"                                               \
113 "       " #asm_op "     %0, %0, %3\n"                                   \
114 "       stxr    %w1, %0, %2\n"                                          \
115 "       cbnz    %w1, 1b"                                                \
116         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
117         : "Ir" (i));                                                    \
118 }                                                                       \
119 __LL_SC_EXPORT(atomic64_##op);
120
121 #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)          \
122 __LL_SC_INLINE long                                                     \
123 __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v))     \
124 {                                                                       \
125         long result;                                                    \
126         unsigned long tmp;                                              \
127                                                                         \
128         asm volatile("// atomic64_" #op "_return" #name "\n"            \
129 "       prfm    pstl1strm, %2\n"                                        \
130 "1:     ld" #acq "xr    %0, %2\n"                                       \
131 "       " #asm_op "     %0, %0, %3\n"                                   \
132 "       st" #rel "xr    %w1, %0, %2\n"                                  \
133 "       cbnz    %w1, 1b\n"                                              \
134 "       " #mb                                                           \
135         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
136         : "Ir" (i)                                                      \
137         : cl);                                                          \
138                                                                         \
139         return result;                                                  \
140 }                                                                       \
141 __LL_SC_EXPORT(atomic64_##op##_return##name);
142
143 #define ATOMIC64_OPS(...)                                               \
144         ATOMIC64_OP(__VA_ARGS__)                                        \
145         ATOMIC64_OP_RETURN(, dmb ish,  , l, "memory", __VA_ARGS__)
146
147 #define ATOMIC64_OPS_RLX(...)                                           \
148         ATOMIC64_OPS(__VA_ARGS__)                                       \
149         ATOMIC64_OP_RETURN(_relaxed,,  ,  ,         , __VA_ARGS__)      \
150         ATOMIC64_OP_RETURN(_acquire,, a,  , "memory", __VA_ARGS__)      \
151         ATOMIC64_OP_RETURN(_release,,  , l, "memory", __VA_ARGS__)
152
153 ATOMIC64_OPS_RLX(add, add)
154 ATOMIC64_OPS_RLX(sub, sub)
155
156 ATOMIC64_OP(and, and)
157 ATOMIC64_OP(andnot, bic)
158 ATOMIC64_OP(or, orr)
159 ATOMIC64_OP(xor, eor)
160
161 #undef ATOMIC64_OPS_RLX
162 #undef ATOMIC64_OPS
163 #undef ATOMIC64_OP_RETURN
164 #undef ATOMIC64_OP
165
166 __LL_SC_INLINE long
167 __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
168 {
169         long result;
170         unsigned long tmp;
171
172         asm volatile("// atomic64_dec_if_positive\n"
173 "       prfm    pstl1strm, %2\n"
174 "1:     ldxr    %0, %2\n"
175 "       subs    %0, %0, #1\n"
176 "       b.lt    2f\n"
177 "       stlxr   %w1, %0, %2\n"
178 "       cbnz    %w1, 1b\n"
179 "       dmb     ish\n"
180 "2:"
181         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
182         :
183         : "cc", "memory");
184
185         return result;
186 }
187 __LL_SC_EXPORT(atomic64_dec_if_positive);
188
189 #define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl)                   \
190 __LL_SC_INLINE unsigned long                                            \
191 __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr,                \
192                                      unsigned long old,                 \
193                                      unsigned long new))                \
194 {                                                                       \
195         unsigned long tmp, oldval;                                      \
196                                                                         \
197         asm volatile(                                                   \
198         "       prfm    pstl1strm, %[v]\n"                              \
199         "1:     ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n"           \
200         "       eor     %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n"  \
201         "       cbnz    %" #w "[tmp], 2f\n"                             \
202         "       st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n"     \
203         "       cbnz    %w[tmp], 1b\n"                                  \
204         "       " #mb "\n"                                              \
205         "       mov     %" #w "[oldval], %" #w "[old]\n"                \
206         "2:"                                                            \
207         : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval),                   \
208           [v] "+Q" (*(unsigned long *)ptr)                              \
209         : [old] "Lr" (old), [new] "r" (new)                             \
210         : cl);                                                          \
211                                                                         \
212         return oldval;                                                  \
213 }                                                                       \
214 __LL_SC_EXPORT(__cmpxchg_case_##name);
215
216 __CMPXCHG_CASE(w, b,     1,        ,  ,  ,         )
217 __CMPXCHG_CASE(w, h,     2,        ,  ,  ,         )
218 __CMPXCHG_CASE(w,  ,     4,        ,  ,  ,         )
219 __CMPXCHG_CASE( ,  ,     8,        ,  ,  ,         )
220 __CMPXCHG_CASE(w, b, acq_1,        , a,  , "memory")
221 __CMPXCHG_CASE(w, h, acq_2,        , a,  , "memory")
222 __CMPXCHG_CASE(w,  , acq_4,        , a,  , "memory")
223 __CMPXCHG_CASE( ,  , acq_8,        , a,  , "memory")
224 __CMPXCHG_CASE(w, b, rel_1,        ,  , l, "memory")
225 __CMPXCHG_CASE(w, h, rel_2,        ,  , l, "memory")
226 __CMPXCHG_CASE(w,  , rel_4,        ,  , l, "memory")
227 __CMPXCHG_CASE( ,  , rel_8,        ,  , l, "memory")
228 __CMPXCHG_CASE(w, b,  mb_1, dmb ish,  , l, "memory")
229 __CMPXCHG_CASE(w, h,  mb_2, dmb ish,  , l, "memory")
230 __CMPXCHG_CASE(w,  ,  mb_4, dmb ish,  , l, "memory")
231 __CMPXCHG_CASE( ,  ,  mb_8, dmb ish,  , l, "memory")
232
233 #undef __CMPXCHG_CASE
234
235 #define __CMPXCHG_DBL(name, mb, rel, cl)                                \
236 __LL_SC_INLINE int                                                      \
237 __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1,               \
238                                       unsigned long old2,               \
239                                       unsigned long new1,               \
240                                       unsigned long new2,               \
241                                       volatile void *ptr))              \
242 {                                                                       \
243         unsigned long tmp, ret;                                         \
244                                                                         \
245         asm volatile("// __cmpxchg_double" #name "\n"                   \
246         "       prfm    pstl1strm, %2\n"                                \
247         "1:     ldxp    %0, %1, %2\n"                                   \
248         "       eor     %0, %0, %3\n"                                   \
249         "       eor     %1, %1, %4\n"                                   \
250         "       orr     %1, %0, %1\n"                                   \
251         "       cbnz    %1, 2f\n"                                       \
252         "       st" #rel "xp    %w0, %5, %6, %2\n"                      \
253         "       cbnz    %w0, 1b\n"                                      \
254         "       " #mb "\n"                                              \
255         "2:"                                                            \
256         : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr)        \
257         : "r" (old1), "r" (old2), "r" (new1), "r" (new2)                \
258         : cl);                                                          \
259                                                                         \
260         return ret;                                                     \
261 }                                                                       \
262 __LL_SC_EXPORT(__cmpxchg_double##name);
263
264 __CMPXCHG_DBL(   ,        ,  ,         )
265 __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
266
267 #undef __CMPXCHG_DBL
268
269 #endif  /* __ASM_ATOMIC_LL_SC_H */