]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/arm64/include/asm/atomic.h
arm64: Provide atomic_{or,xor,and}
[karo-tx-linux.git] / arch / arm64 / include / asm / atomic.h
1 /*
2  * Based on arch/arm/include/asm/atomic.h
3  *
4  * Copyright (C) 1996 Russell King.
5  * Copyright (C) 2002 Deep Blue Solutions Ltd.
6  * Copyright (C) 2012 ARM Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20 #ifndef __ASM_ATOMIC_H
21 #define __ASM_ATOMIC_H
22
23 #include <linux/compiler.h>
24 #include <linux/types.h>
25
26 #include <asm/barrier.h>
27 #include <asm/cmpxchg.h>
28
29 #define ATOMIC_INIT(i)  { (i) }
30
31 #ifdef __KERNEL__
32
33 /*
34  * On ARM, ordinary assignment (str instruction) doesn't clear the local
35  * strex/ldrex monitor on some implementations. The reason we can use it for
36  * atomic_set() is the clrex or dummy strex done on every exception return.
37  */
38 #define atomic_read(v)  ACCESS_ONCE((v)->counter)
39 #define atomic_set(v,i) (((v)->counter) = (i))
40
41 /*
42  * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
43  * store exclusive to ensure that these are atomic.  We may loop
44  * to ensure that the update happens.
45  */
46
47 #define ATOMIC_OP(op, asm_op)                                           \
48 static inline void atomic_##op(int i, atomic_t *v)                      \
49 {                                                                       \
50         unsigned long tmp;                                              \
51         int result;                                                     \
52                                                                         \
53         asm volatile("// atomic_" #op "\n"                              \
54 "1:     ldxr    %w0, %2\n"                                              \
55 "       " #asm_op "     %w0, %w0, %w3\n"                                \
56 "       stxr    %w1, %w0, %2\n"                                         \
57 "       cbnz    %w1, 1b"                                                \
58         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
59         : "Ir" (i));                                                    \
60 }                                                                       \
61
62 #define ATOMIC_OP_RETURN(op, asm_op)                                    \
63 static inline int atomic_##op##_return(int i, atomic_t *v)              \
64 {                                                                       \
65         unsigned long tmp;                                              \
66         int result;                                                     \
67                                                                         \
68         asm volatile("// atomic_" #op "_return\n"                       \
69 "1:     ldxr    %w0, %2\n"                                              \
70 "       " #asm_op "     %w0, %w0, %w3\n"                                \
71 "       stlxr   %w1, %w0, %2\n"                                         \
72 "       cbnz    %w1, 1b"                                                \
73         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
74         : "Ir" (i)                                                      \
75         : "memory");                                                    \
76                                                                         \
77         smp_mb();                                                       \
78         return result;                                                  \
79 }
80
81 #define ATOMIC_OPS(op, asm_op)                                          \
82         ATOMIC_OP(op, asm_op)                                           \
83         ATOMIC_OP_RETURN(op, asm_op)
84
85 ATOMIC_OPS(add, add)
86 ATOMIC_OPS(sub, sub)
87
88 #define CONFIG_ARCH_HAS_ATOMIC_OR
89 #define atomic_andnot atomic_andnot
90
91 ATOMIC_OP(and, and)
92 ATOMIC_OP(andnot, bic)
93 ATOMIC_OP(or, orr)
94 ATOMIC_OP(xor, eor)
95
96 #undef ATOMIC_OPS
97 #undef ATOMIC_OP_RETURN
98 #undef ATOMIC_OP
99
100 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
101 {
102         unsigned long tmp;
103         int oldval;
104
105         smp_mb();
106
107         asm volatile("// atomic_cmpxchg\n"
108 "1:     ldxr    %w1, %2\n"
109 "       cmp     %w1, %w3\n"
110 "       b.ne    2f\n"
111 "       stxr    %w0, %w4, %2\n"
112 "       cbnz    %w0, 1b\n"
113 "2:"
114         : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
115         : "Ir" (old), "r" (new)
116         : "cc");
117
118         smp_mb();
119         return oldval;
120 }
121
122 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
123
124 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
125 {
126         int c, old;
127
128         c = atomic_read(v);
129         while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
130                 c = old;
131         return c;
132 }
133
134 #define atomic_inc(v)           atomic_add(1, v)
135 #define atomic_dec(v)           atomic_sub(1, v)
136
137 #define atomic_inc_and_test(v)  (atomic_add_return(1, v) == 0)
138 #define atomic_dec_and_test(v)  (atomic_sub_return(1, v) == 0)
139 #define atomic_inc_return(v)    (atomic_add_return(1, v))
140 #define atomic_dec_return(v)    (atomic_sub_return(1, v))
141 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
142
143 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
144
145 /*
146  * 64-bit atomic operations.
147  */
148 #define ATOMIC64_INIT(i) { (i) }
149
150 #define atomic64_read(v)        ACCESS_ONCE((v)->counter)
151 #define atomic64_set(v,i)       (((v)->counter) = (i))
152
153 #define ATOMIC64_OP(op, asm_op)                                         \
154 static inline void atomic64_##op(long i, atomic64_t *v)                 \
155 {                                                                       \
156         long result;                                                    \
157         unsigned long tmp;                                              \
158                                                                         \
159         asm volatile("// atomic64_" #op "\n"                            \
160 "1:     ldxr    %0, %2\n"                                               \
161 "       " #asm_op "     %0, %0, %3\n"                                   \
162 "       stxr    %w1, %0, %2\n"                                          \
163 "       cbnz    %w1, 1b"                                                \
164         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
165         : "Ir" (i));                                                    \
166 }                                                                       \
167
168 #define ATOMIC64_OP_RETURN(op, asm_op)                                  \
169 static inline long atomic64_##op##_return(long i, atomic64_t *v)        \
170 {                                                                       \
171         long result;                                                    \
172         unsigned long tmp;                                              \
173                                                                         \
174         asm volatile("// atomic64_" #op "_return\n"                     \
175 "1:     ldxr    %0, %2\n"                                               \
176 "       " #asm_op "     %0, %0, %3\n"                                   \
177 "       stlxr   %w1, %0, %2\n"                                          \
178 "       cbnz    %w1, 1b"                                                \
179         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
180         : "Ir" (i)                                                      \
181         : "memory");                                                    \
182                                                                         \
183         smp_mb();                                                       \
184         return result;                                                  \
185 }
186
187 #define ATOMIC64_OPS(op, asm_op)                                        \
188         ATOMIC64_OP(op, asm_op)                                         \
189         ATOMIC64_OP_RETURN(op, asm_op)
190
191 ATOMIC64_OPS(add, add)
192 ATOMIC64_OPS(sub, sub)
193
194 #define atomic64_andnot atomic64_andnot
195
196 ATOMIC64_OP(and, and)
197 ATOMIC64_OP(andnot, bic)
198 ATOMIC64_OP(or, orr)
199 ATOMIC64_OP(xor, eor)
200
201 #undef ATOMIC64_OPS
202 #undef ATOMIC64_OP_RETURN
203 #undef ATOMIC64_OP
204
205 static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
206 {
207         long oldval;
208         unsigned long res;
209
210         smp_mb();
211
212         asm volatile("// atomic64_cmpxchg\n"
213 "1:     ldxr    %1, %2\n"
214 "       cmp     %1, %3\n"
215 "       b.ne    2f\n"
216 "       stxr    %w0, %4, %2\n"
217 "       cbnz    %w0, 1b\n"
218 "2:"
219         : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
220         : "Ir" (old), "r" (new)
221         : "cc");
222
223         smp_mb();
224         return oldval;
225 }
226
227 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
228
229 static inline long atomic64_dec_if_positive(atomic64_t *v)
230 {
231         long result;
232         unsigned long tmp;
233
234         asm volatile("// atomic64_dec_if_positive\n"
235 "1:     ldxr    %0, %2\n"
236 "       subs    %0, %0, #1\n"
237 "       b.mi    2f\n"
238 "       stlxr   %w1, %0, %2\n"
239 "       cbnz    %w1, 1b\n"
240 "       dmb     ish\n"
241 "2:"
242         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
243         :
244         : "cc", "memory");
245
246         return result;
247 }
248
249 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
250 {
251         long c, old;
252
253         c = atomic64_read(v);
254         while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
255                 c = old;
256
257         return c != u;
258 }
259
260 #define atomic64_add_negative(a, v)     (atomic64_add_return((a), (v)) < 0)
261 #define atomic64_inc(v)                 atomic64_add(1LL, (v))
262 #define atomic64_inc_return(v)          atomic64_add_return(1LL, (v))
263 #define atomic64_inc_and_test(v)        (atomic64_inc_return(v) == 0)
264 #define atomic64_sub_and_test(a, v)     (atomic64_sub_return((a), (v)) == 0)
265 #define atomic64_dec(v)                 atomic64_sub(1LL, (v))
266 #define atomic64_dec_return(v)          atomic64_sub_return(1LL, (v))
267 #define atomic64_dec_and_test(v)        (atomic64_dec_return((v)) == 0)
268 #define atomic64_inc_not_zero(v)        atomic64_add_unless((v), 1LL, 0LL)
269
270 #endif
271 #endif