]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/arm64/include/asm/atomic_lse.h
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
[karo-tx-linux.git] / arch / arm64 / include / asm / atomic_lse.h
1 /*
2  * Based on arch/arm/include/asm/atomic.h
3  *
4  * Copyright (C) 1996 Russell King.
5  * Copyright (C) 2002 Deep Blue Solutions Ltd.
6  * Copyright (C) 2012 ARM Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #ifndef __ASM_ATOMIC_LSE_H
22 #define __ASM_ATOMIC_LSE_H
23
24 #ifndef __ARM64_IN_ATOMIC_IMPL
25 #error "please don't include this file directly"
26 #endif
27
28 #define __LL_SC_ATOMIC(op)      __LL_SC_CALL(atomic_##op)
29 #define ATOMIC_OP(op, asm_op)                                           \
30 static inline void atomic_##op(int i, atomic_t *v)                      \
31 {                                                                       \
32         register int w0 asm ("w0") = i;                                 \
33         register atomic_t *x1 asm ("x1") = v;                           \
34                                                                         \
35         asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op),          \
36 "       " #asm_op "     %w[i], %[v]\n")                                 \
37         : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
38         : "r" (x1)                                                      \
39         : __LL_SC_CLOBBERS);                                            \
40 }
41
42 ATOMIC_OP(andnot, stclr)
43 ATOMIC_OP(or, stset)
44 ATOMIC_OP(xor, steor)
45 ATOMIC_OP(add, stadd)
46
47 #undef ATOMIC_OP
48
49 #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)                    \
50 static inline int atomic_fetch_##op##name(int i, atomic_t *v)           \
51 {                                                                       \
52         register int w0 asm ("w0") = i;                                 \
53         register atomic_t *x1 asm ("x1") = v;                           \
54                                                                         \
55         asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
56         /* LL/SC */                                                     \
57         __LL_SC_ATOMIC(fetch_##op##name),                               \
58         /* LSE atomics */                                               \
59 "       " #asm_op #mb " %w[i], %w[i], %[v]")                            \
60         : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
61         : "r" (x1)                                                      \
62         : __LL_SC_CLOBBERS, ##cl);                                      \
63                                                                         \
64         return w0;                                                      \
65 }
66
67 #define ATOMIC_FETCH_OPS(op, asm_op)                                    \
68         ATOMIC_FETCH_OP(_relaxed,   , op, asm_op)                       \
69         ATOMIC_FETCH_OP(_acquire,  a, op, asm_op, "memory")             \
70         ATOMIC_FETCH_OP(_release,  l, op, asm_op, "memory")             \
71         ATOMIC_FETCH_OP(        , al, op, asm_op, "memory")
72
73 ATOMIC_FETCH_OPS(andnot, ldclr)
74 ATOMIC_FETCH_OPS(or, ldset)
75 ATOMIC_FETCH_OPS(xor, ldeor)
76 ATOMIC_FETCH_OPS(add, ldadd)
77
78 #undef ATOMIC_FETCH_OP
79 #undef ATOMIC_FETCH_OPS
80
81 #define ATOMIC_OP_ADD_RETURN(name, mb, cl...)                           \
82 static inline int atomic_add_return##name(int i, atomic_t *v)           \
83 {                                                                       \
84         register int w0 asm ("w0") = i;                                 \
85         register atomic_t *x1 asm ("x1") = v;                           \
86                                                                         \
87         asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
88         /* LL/SC */                                                     \
89         "       nop\n"                                                  \
90         __LL_SC_ATOMIC(add_return##name),                               \
91         /* LSE atomics */                                               \
92         "       ldadd" #mb "    %w[i], w30, %[v]\n"                     \
93         "       add     %w[i], %w[i], w30")                             \
94         : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
95         : "r" (x1)                                                      \
96         : __LL_SC_CLOBBERS, ##cl);                                      \
97                                                                         \
98         return w0;                                                      \
99 }
100
101 ATOMIC_OP_ADD_RETURN(_relaxed,   )
102 ATOMIC_OP_ADD_RETURN(_acquire,  a, "memory")
103 ATOMIC_OP_ADD_RETURN(_release,  l, "memory")
104 ATOMIC_OP_ADD_RETURN(        , al, "memory")
105
106 #undef ATOMIC_OP_ADD_RETURN
107
108 static inline void atomic_and(int i, atomic_t *v)
109 {
110         register int w0 asm ("w0") = i;
111         register atomic_t *x1 asm ("x1") = v;
112
113         asm volatile(ARM64_LSE_ATOMIC_INSN(
114         /* LL/SC */
115         "       nop\n"
116         __LL_SC_ATOMIC(and),
117         /* LSE atomics */
118         "       mvn     %w[i], %w[i]\n"
119         "       stclr   %w[i], %[v]")
120         : [i] "+r" (w0), [v] "+Q" (v->counter)
121         : "r" (x1)
122         : __LL_SC_CLOBBERS);
123 }
124
125 #define ATOMIC_FETCH_OP_AND(name, mb, cl...)                            \
126 static inline int atomic_fetch_and##name(int i, atomic_t *v)            \
127 {                                                                       \
128         register int w0 asm ("w0") = i;                                 \
129         register atomic_t *x1 asm ("x1") = v;                           \
130                                                                         \
131         asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
132         /* LL/SC */                                                     \
133         "       nop\n"                                                  \
134         __LL_SC_ATOMIC(fetch_and##name),                                \
135         /* LSE atomics */                                               \
136         "       mvn     %w[i], %w[i]\n"                                 \
137         "       ldclr" #mb "    %w[i], %w[i], %[v]")                    \
138         : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
139         : "r" (x1)                                                      \
140         : __LL_SC_CLOBBERS, ##cl);                                      \
141                                                                         \
142         return w0;                                                      \
143 }
144
145 ATOMIC_FETCH_OP_AND(_relaxed,   )
146 ATOMIC_FETCH_OP_AND(_acquire,  a, "memory")
147 ATOMIC_FETCH_OP_AND(_release,  l, "memory")
148 ATOMIC_FETCH_OP_AND(        , al, "memory")
149
150 #undef ATOMIC_FETCH_OP_AND
151
152 static inline void atomic_sub(int i, atomic_t *v)
153 {
154         register int w0 asm ("w0") = i;
155         register atomic_t *x1 asm ("x1") = v;
156
157         asm volatile(ARM64_LSE_ATOMIC_INSN(
158         /* LL/SC */
159         "       nop\n"
160         __LL_SC_ATOMIC(sub),
161         /* LSE atomics */
162         "       neg     %w[i], %w[i]\n"
163         "       stadd   %w[i], %[v]")
164         : [i] "+r" (w0), [v] "+Q" (v->counter)
165         : "r" (x1)
166         : __LL_SC_CLOBBERS);
167 }
168
169 #define ATOMIC_OP_SUB_RETURN(name, mb, cl...)                           \
170 static inline int atomic_sub_return##name(int i, atomic_t *v)           \
171 {                                                                       \
172         register int w0 asm ("w0") = i;                                 \
173         register atomic_t *x1 asm ("x1") = v;                           \
174                                                                         \
175         asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
176         /* LL/SC */                                                     \
177         "       nop\n"                                                  \
178         __LL_SC_ATOMIC(sub_return##name)                                \
179         "       nop",                                                   \
180         /* LSE atomics */                                               \
181         "       neg     %w[i], %w[i]\n"                                 \
182         "       ldadd" #mb "    %w[i], w30, %[v]\n"                     \
183         "       add     %w[i], %w[i], w30")                             \
184         : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
185         : "r" (x1)                                                      \
186         : __LL_SC_CLOBBERS , ##cl);                                     \
187                                                                         \
188         return w0;                                                      \
189 }
190
191 ATOMIC_OP_SUB_RETURN(_relaxed,   )
192 ATOMIC_OP_SUB_RETURN(_acquire,  a, "memory")
193 ATOMIC_OP_SUB_RETURN(_release,  l, "memory")
194 ATOMIC_OP_SUB_RETURN(        , al, "memory")
195
196 #undef ATOMIC_OP_SUB_RETURN
197
198 #define ATOMIC_FETCH_OP_SUB(name, mb, cl...)                            \
199 static inline int atomic_fetch_sub##name(int i, atomic_t *v)            \
200 {                                                                       \
201         register int w0 asm ("w0") = i;                                 \
202         register atomic_t *x1 asm ("x1") = v;                           \
203                                                                         \
204         asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
205         /* LL/SC */                                                     \
206         "       nop\n"                                                  \
207         __LL_SC_ATOMIC(fetch_sub##name),                                \
208         /* LSE atomics */                                               \
209         "       neg     %w[i], %w[i]\n"                                 \
210         "       ldadd" #mb "    %w[i], %w[i], %[v]")                    \
211         : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
212         : "r" (x1)                                                      \
213         : __LL_SC_CLOBBERS, ##cl);                                      \
214                                                                         \
215         return w0;                                                      \
216 }
217
218 ATOMIC_FETCH_OP_SUB(_relaxed,   )
219 ATOMIC_FETCH_OP_SUB(_acquire,  a, "memory")
220 ATOMIC_FETCH_OP_SUB(_release,  l, "memory")
221 ATOMIC_FETCH_OP_SUB(        , al, "memory")
222
223 #undef ATOMIC_FETCH_OP_SUB
224 #undef __LL_SC_ATOMIC
225
226 #define __LL_SC_ATOMIC64(op)    __LL_SC_CALL(atomic64_##op)
227 #define ATOMIC64_OP(op, asm_op)                                         \
228 static inline void atomic64_##op(long i, atomic64_t *v)                 \
229 {                                                                       \
230         register long x0 asm ("x0") = i;                                \
231         register atomic64_t *x1 asm ("x1") = v;                         \
232                                                                         \
233         asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op),        \
234 "       " #asm_op "     %[i], %[v]\n")                                  \
235         : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
236         : "r" (x1)                                                      \
237         : __LL_SC_CLOBBERS);                                            \
238 }
239
240 ATOMIC64_OP(andnot, stclr)
241 ATOMIC64_OP(or, stset)
242 ATOMIC64_OP(xor, steor)
243 ATOMIC64_OP(add, stadd)
244
245 #undef ATOMIC64_OP
246
247 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)                  \
248 static inline long atomic64_fetch_##op##name(long i, atomic64_t *v)     \
249 {                                                                       \
250         register long x0 asm ("x0") = i;                                \
251         register atomic64_t *x1 asm ("x1") = v;                         \
252                                                                         \
253         asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
254         /* LL/SC */                                                     \
255         __LL_SC_ATOMIC64(fetch_##op##name),                             \
256         /* LSE atomics */                                               \
257 "       " #asm_op #mb " %[i], %[i], %[v]")                              \
258         : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
259         : "r" (x1)                                                      \
260         : __LL_SC_CLOBBERS, ##cl);                                      \
261                                                                         \
262         return x0;                                                      \
263 }
264
265 #define ATOMIC64_FETCH_OPS(op, asm_op)                                  \
266         ATOMIC64_FETCH_OP(_relaxed,   , op, asm_op)                     \
267         ATOMIC64_FETCH_OP(_acquire,  a, op, asm_op, "memory")           \
268         ATOMIC64_FETCH_OP(_release,  l, op, asm_op, "memory")           \
269         ATOMIC64_FETCH_OP(        , al, op, asm_op, "memory")
270
271 ATOMIC64_FETCH_OPS(andnot, ldclr)
272 ATOMIC64_FETCH_OPS(or, ldset)
273 ATOMIC64_FETCH_OPS(xor, ldeor)
274 ATOMIC64_FETCH_OPS(add, ldadd)
275
276 #undef ATOMIC64_FETCH_OP
277 #undef ATOMIC64_FETCH_OPS
278
279 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)                         \
280 static inline long atomic64_add_return##name(long i, atomic64_t *v)     \
281 {                                                                       \
282         register long x0 asm ("x0") = i;                                \
283         register atomic64_t *x1 asm ("x1") = v;                         \
284                                                                         \
285         asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
286         /* LL/SC */                                                     \
287         "       nop\n"                                                  \
288         __LL_SC_ATOMIC64(add_return##name),                             \
289         /* LSE atomics */                                               \
290         "       ldadd" #mb "    %[i], x30, %[v]\n"                      \
291         "       add     %[i], %[i], x30")                               \
292         : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
293         : "r" (x1)                                                      \
294         : __LL_SC_CLOBBERS, ##cl);                                      \
295                                                                         \
296         return x0;                                                      \
297 }
298
299 ATOMIC64_OP_ADD_RETURN(_relaxed,   )
300 ATOMIC64_OP_ADD_RETURN(_acquire,  a, "memory")
301 ATOMIC64_OP_ADD_RETURN(_release,  l, "memory")
302 ATOMIC64_OP_ADD_RETURN(        , al, "memory")
303
304 #undef ATOMIC64_OP_ADD_RETURN
305
306 static inline void atomic64_and(long i, atomic64_t *v)
307 {
308         register long x0 asm ("x0") = i;
309         register atomic64_t *x1 asm ("x1") = v;
310
311         asm volatile(ARM64_LSE_ATOMIC_INSN(
312         /* LL/SC */
313         "       nop\n"
314         __LL_SC_ATOMIC64(and),
315         /* LSE atomics */
316         "       mvn     %[i], %[i]\n"
317         "       stclr   %[i], %[v]")
318         : [i] "+r" (x0), [v] "+Q" (v->counter)
319         : "r" (x1)
320         : __LL_SC_CLOBBERS);
321 }
322
323 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...)                          \
324 static inline long atomic64_fetch_and##name(long i, atomic64_t *v)      \
325 {                                                                       \
326         register long x0 asm ("w0") = i;                                \
327         register atomic64_t *x1 asm ("x1") = v;                         \
328                                                                         \
329         asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
330         /* LL/SC */                                                     \
331         "       nop\n"                                                  \
332         __LL_SC_ATOMIC64(fetch_and##name),                              \
333         /* LSE atomics */                                               \
334         "       mvn     %[i], %[i]\n"                                   \
335         "       ldclr" #mb "    %[i], %[i], %[v]")                      \
336         : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
337         : "r" (x1)                                                      \
338         : __LL_SC_CLOBBERS, ##cl);                                      \
339                                                                         \
340         return x0;                                                      \
341 }
342
343 ATOMIC64_FETCH_OP_AND(_relaxed,   )
344 ATOMIC64_FETCH_OP_AND(_acquire,  a, "memory")
345 ATOMIC64_FETCH_OP_AND(_release,  l, "memory")
346 ATOMIC64_FETCH_OP_AND(        , al, "memory")
347
348 #undef ATOMIC64_FETCH_OP_AND
349
350 static inline void atomic64_sub(long i, atomic64_t *v)
351 {
352         register long x0 asm ("x0") = i;
353         register atomic64_t *x1 asm ("x1") = v;
354
355         asm volatile(ARM64_LSE_ATOMIC_INSN(
356         /* LL/SC */
357         "       nop\n"
358         __LL_SC_ATOMIC64(sub),
359         /* LSE atomics */
360         "       neg     %[i], %[i]\n"
361         "       stadd   %[i], %[v]")
362         : [i] "+r" (x0), [v] "+Q" (v->counter)
363         : "r" (x1)
364         : __LL_SC_CLOBBERS);
365 }
366
367 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)                         \
368 static inline long atomic64_sub_return##name(long i, atomic64_t *v)     \
369 {                                                                       \
370         register long x0 asm ("x0") = i;                                \
371         register atomic64_t *x1 asm ("x1") = v;                         \
372                                                                         \
373         asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
374         /* LL/SC */                                                     \
375         "       nop\n"                                                  \
376         __LL_SC_ATOMIC64(sub_return##name)                              \
377         "       nop",                                                   \
378         /* LSE atomics */                                               \
379         "       neg     %[i], %[i]\n"                                   \
380         "       ldadd" #mb "    %[i], x30, %[v]\n"                      \
381         "       add     %[i], %[i], x30")                               \
382         : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
383         : "r" (x1)                                                      \
384         : __LL_SC_CLOBBERS, ##cl);                                      \
385                                                                         \
386         return x0;                                                      \
387 }
388
389 ATOMIC64_OP_SUB_RETURN(_relaxed,   )
390 ATOMIC64_OP_SUB_RETURN(_acquire,  a, "memory")
391 ATOMIC64_OP_SUB_RETURN(_release,  l, "memory")
392 ATOMIC64_OP_SUB_RETURN(        , al, "memory")
393
394 #undef ATOMIC64_OP_SUB_RETURN
395
396 #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...)                          \
397 static inline long atomic64_fetch_sub##name(long i, atomic64_t *v)      \
398 {                                                                       \
399         register long x0 asm ("w0") = i;                                \
400         register atomic64_t *x1 asm ("x1") = v;                         \
401                                                                         \
402         asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
403         /* LL/SC */                                                     \
404         "       nop\n"                                                  \
405         __LL_SC_ATOMIC64(fetch_sub##name),                              \
406         /* LSE atomics */                                               \
407         "       neg     %[i], %[i]\n"                                   \
408         "       ldadd" #mb "    %[i], %[i], %[v]")                      \
409         : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
410         : "r" (x1)                                                      \
411         : __LL_SC_CLOBBERS, ##cl);                                      \
412                                                                         \
413         return x0;                                                      \
414 }
415
416 ATOMIC64_FETCH_OP_SUB(_relaxed,   )
417 ATOMIC64_FETCH_OP_SUB(_acquire,  a, "memory")
418 ATOMIC64_FETCH_OP_SUB(_release,  l, "memory")
419 ATOMIC64_FETCH_OP_SUB(        , al, "memory")
420
421 #undef ATOMIC64_FETCH_OP_SUB
422
423 static inline long atomic64_dec_if_positive(atomic64_t *v)
424 {
425         register long x0 asm ("x0") = (long)v;
426
427         asm volatile(ARM64_LSE_ATOMIC_INSN(
428         /* LL/SC */
429         "       nop\n"
430         __LL_SC_ATOMIC64(dec_if_positive)
431         "       nop\n"
432         "       nop\n"
433         "       nop\n"
434         "       nop\n"
435         "       nop",
436         /* LSE atomics */
437         "1:     ldr     x30, %[v]\n"
438         "       subs    %[ret], x30, #1\n"
439         "       b.lt    2f\n"
440         "       casal   x30, %[ret], %[v]\n"
441         "       sub     x30, x30, #1\n"
442         "       sub     x30, x30, %[ret]\n"
443         "       cbnz    x30, 1b\n"
444         "2:")
445         : [ret] "+&r" (x0), [v] "+Q" (v->counter)
446         :
447         : __LL_SC_CLOBBERS, "cc", "memory");
448
449         return x0;
450 }
451
452 #undef __LL_SC_ATOMIC64
453
454 #define __LL_SC_CMPXCHG(op)     __LL_SC_CALL(__cmpxchg_case_##op)
455
456 #define __CMPXCHG_CASE(w, sz, name, mb, cl...)                          \
457 static inline unsigned long __cmpxchg_case_##name(volatile void *ptr,   \
458                                                   unsigned long old,    \
459                                                   unsigned long new)    \
460 {                                                                       \
461         register unsigned long x0 asm ("x0") = (unsigned long)ptr;      \
462         register unsigned long x1 asm ("x1") = old;                     \
463         register unsigned long x2 asm ("x2") = new;                     \
464                                                                         \
465         asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
466         /* LL/SC */                                                     \
467         "       nop\n"                                                  \
468                 __LL_SC_CMPXCHG(name)                                   \
469         "       nop",                                                   \
470         /* LSE atomics */                                               \
471         "       mov     " #w "30, %" #w "[old]\n"                       \
472         "       cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n"         \
473         "       mov     %" #w "[ret], " #w "30")                        \
474         : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr)             \
475         : [old] "r" (x1), [new] "r" (x2)                                \
476         : __LL_SC_CLOBBERS, ##cl);                                      \
477                                                                         \
478         return x0;                                                      \
479 }
480
481 __CMPXCHG_CASE(w, b,     1,   )
482 __CMPXCHG_CASE(w, h,     2,   )
483 __CMPXCHG_CASE(w,  ,     4,   )
484 __CMPXCHG_CASE(x,  ,     8,   )
485 __CMPXCHG_CASE(w, b, acq_1,  a, "memory")
486 __CMPXCHG_CASE(w, h, acq_2,  a, "memory")
487 __CMPXCHG_CASE(w,  , acq_4,  a, "memory")
488 __CMPXCHG_CASE(x,  , acq_8,  a, "memory")
489 __CMPXCHG_CASE(w, b, rel_1,  l, "memory")
490 __CMPXCHG_CASE(w, h, rel_2,  l, "memory")
491 __CMPXCHG_CASE(w,  , rel_4,  l, "memory")
492 __CMPXCHG_CASE(x,  , rel_8,  l, "memory")
493 __CMPXCHG_CASE(w, b,  mb_1, al, "memory")
494 __CMPXCHG_CASE(w, h,  mb_2, al, "memory")
495 __CMPXCHG_CASE(w,  ,  mb_4, al, "memory")
496 __CMPXCHG_CASE(x,  ,  mb_8, al, "memory")
497
498 #undef __LL_SC_CMPXCHG
499 #undef __CMPXCHG_CASE
500
501 #define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
502
503 #define __CMPXCHG_DBL(name, mb, cl...)                                  \
504 static inline long __cmpxchg_double##name(unsigned long old1,           \
505                                          unsigned long old2,            \
506                                          unsigned long new1,            \
507                                          unsigned long new2,            \
508                                          volatile void *ptr)            \
509 {                                                                       \
510         unsigned long oldval1 = old1;                                   \
511         unsigned long oldval2 = old2;                                   \
512         register unsigned long x0 asm ("x0") = old1;                    \
513         register unsigned long x1 asm ("x1") = old2;                    \
514         register unsigned long x2 asm ("x2") = new1;                    \
515         register unsigned long x3 asm ("x3") = new2;                    \
516         register unsigned long x4 asm ("x4") = (unsigned long)ptr;      \
517                                                                         \
518         asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
519         /* LL/SC */                                                     \
520         "       nop\n"                                                  \
521         "       nop\n"                                                  \
522         "       nop\n"                                                  \
523         __LL_SC_CMPXCHG_DBL(name),                                      \
524         /* LSE atomics */                                               \
525         "       casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
526         "       eor     %[old1], %[old1], %[oldval1]\n"                 \
527         "       eor     %[old2], %[old2], %[oldval2]\n"                 \
528         "       orr     %[old1], %[old1], %[old2]")                     \
529         : [old1] "+r" (x0), [old2] "+r" (x1),                           \
530           [v] "+Q" (*(unsigned long *)ptr)                              \
531         : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),             \
532           [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)              \
533         : __LL_SC_CLOBBERS, ##cl);                                      \
534                                                                         \
535         return x0;                                                      \
536 }
537
538 __CMPXCHG_DBL(   ,   )
539 __CMPXCHG_DBL(_mb, al, "memory")
540
541 #undef __LL_SC_CMPXCHG_DBL
542 #undef __CMPXCHG_DBL
543
544 #endif  /* __ASM_ATOMIC_LSE_H */