]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/arm64/include/asm/atomic_ll_sc.h
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
[karo-tx-linux.git] / arch / arm64 / include / asm / atomic_ll_sc.h
1 /*
2  * Based on arch/arm/include/asm/atomic.h
3  *
4  * Copyright (C) 1996 Russell King.
5  * Copyright (C) 2002 Deep Blue Solutions Ltd.
6  * Copyright (C) 2012 ARM Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #ifndef __ASM_ATOMIC_LL_SC_H
22 #define __ASM_ATOMIC_LL_SC_H
23
24 #ifndef __ARM64_IN_ATOMIC_IMPL
25 #error "please don't include this file directly"
26 #endif
27
28 /*
29  * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
30  * store exclusive to ensure that these are atomic.  We may loop
31  * to ensure that the update happens.
32  *
33  * NOTE: these functions do *not* follow the PCS and must explicitly
34  * save any clobbered registers other than x0 (regardless of return
35  * value).  This is achieved through -fcall-saved-* compiler flags for
36  * this file, which unfortunately don't work on a per-function basis
37  * (the optimize attribute silently ignores these options).
38  */
39
40 #define ATOMIC_OP(op, asm_op)                                           \
41 __LL_SC_INLINE void                                                     \
42 __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v))                         \
43 {                                                                       \
44         unsigned long tmp;                                              \
45         int result;                                                     \
46                                                                         \
47         asm volatile("// atomic_" #op "\n"                              \
48 "       prfm    pstl1strm, %2\n"                                        \
49 "1:     ldxr    %w0, %2\n"                                              \
50 "       " #asm_op "     %w0, %w0, %w3\n"                                \
51 "       stxr    %w1, %w0, %2\n"                                         \
52 "       cbnz    %w1, 1b"                                                \
53         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
54         : "Ir" (i));                                                    \
55 }                                                                       \
56 __LL_SC_EXPORT(atomic_##op);
57
58 #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)            \
59 __LL_SC_INLINE int                                                      \
60 __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v))          \
61 {                                                                       \
62         unsigned long tmp;                                              \
63         int result;                                                     \
64                                                                         \
65         asm volatile("// atomic_" #op "_return" #name "\n"              \
66 "       prfm    pstl1strm, %2\n"                                        \
67 "1:     ld" #acq "xr    %w0, %2\n"                                      \
68 "       " #asm_op "     %w0, %w0, %w3\n"                                \
69 "       st" #rel "xr    %w1, %w0, %2\n"                                 \
70 "       cbnz    %w1, 1b\n"                                              \
71 "       " #mb                                                           \
72         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
73         : "Ir" (i)                                                      \
74         : cl);                                                          \
75                                                                         \
76         return result;                                                  \
77 }                                                                       \
78 __LL_SC_EXPORT(atomic_##op##_return##name);
79
80 #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)             \
81 __LL_SC_INLINE int                                                      \
82 __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v))             \
83 {                                                                       \
84         unsigned long tmp;                                              \
85         int val, result;                                                \
86                                                                         \
87         asm volatile("// atomic_fetch_" #op #name "\n"                  \
88 "       prfm    pstl1strm, %3\n"                                        \
89 "1:     ld" #acq "xr    %w0, %3\n"                                      \
90 "       " #asm_op "     %w1, %w0, %w4\n"                                \
91 "       st" #rel "xr    %w2, %w1, %3\n"                                 \
92 "       cbnz    %w2, 1b\n"                                              \
93 "       " #mb                                                           \
94         : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
95         : "Ir" (i)                                                      \
96         : cl);                                                          \
97                                                                         \
98         return result;                                                  \
99 }                                                                       \
100 __LL_SC_EXPORT(atomic_fetch_##op##name);
101
102 #define ATOMIC_OPS(...)                                                 \
103         ATOMIC_OP(__VA_ARGS__)                                          \
104         ATOMIC_OP_RETURN(        , dmb ish,  , l, "memory", __VA_ARGS__)\
105         ATOMIC_OP_RETURN(_relaxed,        ,  ,  ,         , __VA_ARGS__)\
106         ATOMIC_OP_RETURN(_acquire,        , a,  , "memory", __VA_ARGS__)\
107         ATOMIC_OP_RETURN(_release,        ,  , l, "memory", __VA_ARGS__)\
108         ATOMIC_FETCH_OP (        , dmb ish,  , l, "memory", __VA_ARGS__)\
109         ATOMIC_FETCH_OP (_relaxed,        ,  ,  ,         , __VA_ARGS__)\
110         ATOMIC_FETCH_OP (_acquire,        , a,  , "memory", __VA_ARGS__)\
111         ATOMIC_FETCH_OP (_release,        ,  , l, "memory", __VA_ARGS__)
112
113 ATOMIC_OPS(add, add)
114 ATOMIC_OPS(sub, sub)
115
116 #undef ATOMIC_OPS
117 #define ATOMIC_OPS(...)                                                 \
118         ATOMIC_OP(__VA_ARGS__)                                          \
119         ATOMIC_FETCH_OP (        , dmb ish,  , l, "memory", __VA_ARGS__)\
120         ATOMIC_FETCH_OP (_relaxed,        ,  ,  ,         , __VA_ARGS__)\
121         ATOMIC_FETCH_OP (_acquire,        , a,  , "memory", __VA_ARGS__)\
122         ATOMIC_FETCH_OP (_release,        ,  , l, "memory", __VA_ARGS__)
123
124 ATOMIC_OPS(and, and)
125 ATOMIC_OPS(andnot, bic)
126 ATOMIC_OPS(or, orr)
127 ATOMIC_OPS(xor, eor)
128
129 #undef ATOMIC_OPS
130 #undef ATOMIC_FETCH_OP
131 #undef ATOMIC_OP_RETURN
132 #undef ATOMIC_OP
133
134 #define ATOMIC64_OP(op, asm_op)                                         \
135 __LL_SC_INLINE void                                                     \
136 __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v))                    \
137 {                                                                       \
138         long result;                                                    \
139         unsigned long tmp;                                              \
140                                                                         \
141         asm volatile("// atomic64_" #op "\n"                            \
142 "       prfm    pstl1strm, %2\n"                                        \
143 "1:     ldxr    %0, %2\n"                                               \
144 "       " #asm_op "     %0, %0, %3\n"                                   \
145 "       stxr    %w1, %0, %2\n"                                          \
146 "       cbnz    %w1, 1b"                                                \
147         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
148         : "Ir" (i));                                                    \
149 }                                                                       \
150 __LL_SC_EXPORT(atomic64_##op);
151
152 #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)          \
153 __LL_SC_INLINE long                                                     \
154 __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v))     \
155 {                                                                       \
156         long result;                                                    \
157         unsigned long tmp;                                              \
158                                                                         \
159         asm volatile("// atomic64_" #op "_return" #name "\n"            \
160 "       prfm    pstl1strm, %2\n"                                        \
161 "1:     ld" #acq "xr    %0, %2\n"                                       \
162 "       " #asm_op "     %0, %0, %3\n"                                   \
163 "       st" #rel "xr    %w1, %0, %2\n"                                  \
164 "       cbnz    %w1, 1b\n"                                              \
165 "       " #mb                                                           \
166         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
167         : "Ir" (i)                                                      \
168         : cl);                                                          \
169                                                                         \
170         return result;                                                  \
171 }                                                                       \
172 __LL_SC_EXPORT(atomic64_##op##_return##name);
173
174 #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)           \
175 __LL_SC_INLINE long                                                     \
176 __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v))        \
177 {                                                                       \
178         long result, val;                                               \
179         unsigned long tmp;                                              \
180                                                                         \
181         asm volatile("// atomic64_fetch_" #op #name "\n"                \
182 "       prfm    pstl1strm, %3\n"                                        \
183 "1:     ld" #acq "xr    %0, %3\n"                                       \
184 "       " #asm_op "     %1, %0, %4\n"                                   \
185 "       st" #rel "xr    %w2, %1, %3\n"                                  \
186 "       cbnz    %w2, 1b\n"                                              \
187 "       " #mb                                                           \
188         : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
189         : "Ir" (i)                                                      \
190         : cl);                                                          \
191                                                                         \
192         return result;                                                  \
193 }                                                                       \
194 __LL_SC_EXPORT(atomic64_fetch_##op##name);
195
196 #define ATOMIC64_OPS(...)                                               \
197         ATOMIC64_OP(__VA_ARGS__)                                        \
198         ATOMIC64_OP_RETURN(, dmb ish,  , l, "memory", __VA_ARGS__)      \
199         ATOMIC64_OP_RETURN(_relaxed,,  ,  ,         , __VA_ARGS__)      \
200         ATOMIC64_OP_RETURN(_acquire,, a,  , "memory", __VA_ARGS__)      \
201         ATOMIC64_OP_RETURN(_release,,  , l, "memory", __VA_ARGS__)      \
202         ATOMIC64_FETCH_OP (, dmb ish,  , l, "memory", __VA_ARGS__)      \
203         ATOMIC64_FETCH_OP (_relaxed,,  ,  ,         , __VA_ARGS__)      \
204         ATOMIC64_FETCH_OP (_acquire,, a,  , "memory", __VA_ARGS__)      \
205         ATOMIC64_FETCH_OP (_release,,  , l, "memory", __VA_ARGS__)
206
207 ATOMIC64_OPS(add, add)
208 ATOMIC64_OPS(sub, sub)
209
210 #undef ATOMIC64_OPS
211 #define ATOMIC64_OPS(...)                                               \
212         ATOMIC64_OP(__VA_ARGS__)                                        \
213         ATOMIC64_FETCH_OP (, dmb ish,  , l, "memory", __VA_ARGS__)      \
214         ATOMIC64_FETCH_OP (_relaxed,,  ,  ,         , __VA_ARGS__)      \
215         ATOMIC64_FETCH_OP (_acquire,, a,  , "memory", __VA_ARGS__)      \
216         ATOMIC64_FETCH_OP (_release,,  , l, "memory", __VA_ARGS__)
217
218 ATOMIC64_OPS(and, and)
219 ATOMIC64_OPS(andnot, bic)
220 ATOMIC64_OPS(or, orr)
221 ATOMIC64_OPS(xor, eor)
222
223 #undef ATOMIC64_OPS
224 #undef ATOMIC64_FETCH_OP
225 #undef ATOMIC64_OP_RETURN
226 #undef ATOMIC64_OP
227
228 __LL_SC_INLINE long
229 __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
230 {
231         long result;
232         unsigned long tmp;
233
234         asm volatile("// atomic64_dec_if_positive\n"
235 "       prfm    pstl1strm, %2\n"
236 "1:     ldxr    %0, %2\n"
237 "       subs    %0, %0, #1\n"
238 "       b.lt    2f\n"
239 "       stlxr   %w1, %0, %2\n"
240 "       cbnz    %w1, 1b\n"
241 "       dmb     ish\n"
242 "2:"
243         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
244         :
245         : "cc", "memory");
246
247         return result;
248 }
249 __LL_SC_EXPORT(atomic64_dec_if_positive);
250
251 #define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl)                   \
252 __LL_SC_INLINE unsigned long                                            \
253 __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr,                \
254                                      unsigned long old,                 \
255                                      unsigned long new))                \
256 {                                                                       \
257         unsigned long tmp, oldval;                                      \
258                                                                         \
259         asm volatile(                                                   \
260         "       prfm    pstl1strm, %[v]\n"                              \
261         "1:     ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n"           \
262         "       eor     %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n"  \
263         "       cbnz    %" #w "[tmp], 2f\n"                             \
264         "       st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n"     \
265         "       cbnz    %w[tmp], 1b\n"                                  \
266         "       " #mb "\n"                                              \
267         "       mov     %" #w "[oldval], %" #w "[old]\n"                \
268         "2:"                                                            \
269         : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval),                   \
270           [v] "+Q" (*(unsigned long *)ptr)                              \
271         : [old] "Lr" (old), [new] "r" (new)                             \
272         : cl);                                                          \
273                                                                         \
274         return oldval;                                                  \
275 }                                                                       \
276 __LL_SC_EXPORT(__cmpxchg_case_##name);
277
278 __CMPXCHG_CASE(w, b,     1,        ,  ,  ,         )
279 __CMPXCHG_CASE(w, h,     2,        ,  ,  ,         )
280 __CMPXCHG_CASE(w,  ,     4,        ,  ,  ,         )
281 __CMPXCHG_CASE( ,  ,     8,        ,  ,  ,         )
282 __CMPXCHG_CASE(w, b, acq_1,        , a,  , "memory")
283 __CMPXCHG_CASE(w, h, acq_2,        , a,  , "memory")
284 __CMPXCHG_CASE(w,  , acq_4,        , a,  , "memory")
285 __CMPXCHG_CASE( ,  , acq_8,        , a,  , "memory")
286 __CMPXCHG_CASE(w, b, rel_1,        ,  , l, "memory")
287 __CMPXCHG_CASE(w, h, rel_2,        ,  , l, "memory")
288 __CMPXCHG_CASE(w,  , rel_4,        ,  , l, "memory")
289 __CMPXCHG_CASE( ,  , rel_8,        ,  , l, "memory")
290 __CMPXCHG_CASE(w, b,  mb_1, dmb ish,  , l, "memory")
291 __CMPXCHG_CASE(w, h,  mb_2, dmb ish,  , l, "memory")
292 __CMPXCHG_CASE(w,  ,  mb_4, dmb ish,  , l, "memory")
293 __CMPXCHG_CASE( ,  ,  mb_8, dmb ish,  , l, "memory")
294
295 #undef __CMPXCHG_CASE
296
297 #define __CMPXCHG_DBL(name, mb, rel, cl)                                \
298 __LL_SC_INLINE long                                                     \
299 __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1,               \
300                                       unsigned long old2,               \
301                                       unsigned long new1,               \
302                                       unsigned long new2,               \
303                                       volatile void *ptr))              \
304 {                                                                       \
305         unsigned long tmp, ret;                                         \
306                                                                         \
307         asm volatile("// __cmpxchg_double" #name "\n"                   \
308         "       prfm    pstl1strm, %2\n"                                \
309         "1:     ldxp    %0, %1, %2\n"                                   \
310         "       eor     %0, %0, %3\n"                                   \
311         "       eor     %1, %1, %4\n"                                   \
312         "       orr     %1, %0, %1\n"                                   \
313         "       cbnz    %1, 2f\n"                                       \
314         "       st" #rel "xp    %w0, %5, %6, %2\n"                      \
315         "       cbnz    %w0, 1b\n"                                      \
316         "       " #mb "\n"                                              \
317         "2:"                                                            \
318         : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr)        \
319         : "r" (old1), "r" (old2), "r" (new1), "r" (new2)                \
320         : cl);                                                          \
321                                                                         \
322         return ret;                                                     \
323 }                                                                       \
324 __LL_SC_EXPORT(__cmpxchg_double##name);
325
326 __CMPXCHG_DBL(   ,        ,  ,         )
327 __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
328
329 #undef __CMPXCHG_DBL
330
331 #endif  /* __ASM_ATOMIC_LL_SC_H */