1 /* Atomic operations usable in machine independent code */
2 #ifndef _LINUX_ATOMIC_H
3 #define _LINUX_ATOMIC_H
4 #include <asm/atomic.h>
5 #include <asm/barrier.h>
7 #ifndef atomic_read_ctrl
8 static inline int atomic_read_ctrl(const atomic_t *v)
10 int val = atomic_read(v);
11 smp_read_barrier_depends(); /* Enforce control dependency. */
17 * Relaxed variants of xchg, cmpxchg and some atomic operations.
19 * We support four variants:
21 * - Fully ordered: The default implementation, no suffix required.
22 * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
23 * - Release: Provides RELEASE semantics, _release suffix.
24 * - Relaxed: No ordering guarantees, _relaxed suffix.
26 * For compound atomics performing both a load and a store, ACQUIRE
27 * semantics apply only to the load and RELEASE semantics only to the
28 * store portion of the operation. Note that a failed cmpxchg_acquire
29 * does -not- imply any memory ordering constraints.
31 * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
34 #ifndef atomic_read_acquire
35 #define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
38 #ifndef atomic_set_release
39 #define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
43 * The idea here is to build acquire/release variants by adding explicit
44 * barriers on top of the relaxed variant. In the case where the relaxed
45 * variant is already fully ordered, no additional barriers are needed.
47 #define __atomic_op_acquire(op, args...) \
49 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
50 smp_mb__after_atomic(); \
54 #define __atomic_op_release(op, args...) \
56 smp_mb__before_atomic(); \
60 #define __atomic_op_fence(op, args...) \
62 typeof(op##_relaxed(args)) __ret; \
63 smp_mb__before_atomic(); \
64 __ret = op##_relaxed(args); \
65 smp_mb__after_atomic(); \
69 /* atomic_add_return_relaxed */
70 #ifndef atomic_add_return_relaxed
71 #define atomic_add_return_relaxed atomic_add_return
72 #define atomic_add_return_acquire atomic_add_return
73 #define atomic_add_return_release atomic_add_return
75 #else /* atomic_add_return_relaxed */
77 #ifndef atomic_add_return_acquire
78 #define atomic_add_return_acquire(...) \
79 __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
82 #ifndef atomic_add_return_release
83 #define atomic_add_return_release(...) \
84 __atomic_op_release(atomic_add_return, __VA_ARGS__)
87 #ifndef atomic_add_return
88 #define atomic_add_return(...) \
89 __atomic_op_fence(atomic_add_return, __VA_ARGS__)
91 #endif /* atomic_add_return_relaxed */
93 /* atomic_sub_return_relaxed */
94 #ifndef atomic_sub_return_relaxed
95 #define atomic_sub_return_relaxed atomic_sub_return
96 #define atomic_sub_return_acquire atomic_sub_return
97 #define atomic_sub_return_release atomic_sub_return
99 #else /* atomic_sub_return_relaxed */
101 #ifndef atomic_sub_return_acquire
102 #define atomic_sub_return_acquire(...) \
103 __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
106 #ifndef atomic_sub_return_release
107 #define atomic_sub_return_release(...) \
108 __atomic_op_release(atomic_sub_return, __VA_ARGS__)
111 #ifndef atomic_sub_return
112 #define atomic_sub_return(...) \
113 __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
115 #endif /* atomic_sub_return_relaxed */
117 /* atomic_xchg_relaxed */
118 #ifndef atomic_xchg_relaxed
119 #define atomic_xchg_relaxed atomic_xchg
120 #define atomic_xchg_acquire atomic_xchg
121 #define atomic_xchg_release atomic_xchg
123 #else /* atomic_xchg_relaxed */
125 #ifndef atomic_xchg_acquire
126 #define atomic_xchg_acquire(...) \
127 __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
130 #ifndef atomic_xchg_release
131 #define atomic_xchg_release(...) \
132 __atomic_op_release(atomic_xchg, __VA_ARGS__)
136 #define atomic_xchg(...) \
137 __atomic_op_fence(atomic_xchg, __VA_ARGS__)
139 #endif /* atomic_xchg_relaxed */
141 /* atomic_cmpxchg_relaxed */
142 #ifndef atomic_cmpxchg_relaxed
143 #define atomic_cmpxchg_relaxed atomic_cmpxchg
144 #define atomic_cmpxchg_acquire atomic_cmpxchg
145 #define atomic_cmpxchg_release atomic_cmpxchg
147 #else /* atomic_cmpxchg_relaxed */
149 #ifndef atomic_cmpxchg_acquire
150 #define atomic_cmpxchg_acquire(...) \
151 __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
154 #ifndef atomic_cmpxchg_release
155 #define atomic_cmpxchg_release(...) \
156 __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
159 #ifndef atomic_cmpxchg
160 #define atomic_cmpxchg(...) \
161 __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
163 #endif /* atomic_cmpxchg_relaxed */
165 #ifndef atomic64_read_acquire
166 #define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
169 #ifndef atomic64_set_release
170 #define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
173 /* atomic64_add_return_relaxed */
174 #ifndef atomic64_add_return_relaxed
175 #define atomic64_add_return_relaxed atomic64_add_return
176 #define atomic64_add_return_acquire atomic64_add_return
177 #define atomic64_add_return_release atomic64_add_return
179 #else /* atomic64_add_return_relaxed */
181 #ifndef atomic64_add_return_acquire
182 #define atomic64_add_return_acquire(...) \
183 __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
186 #ifndef atomic64_add_return_release
187 #define atomic64_add_return_release(...) \
188 __atomic_op_release(atomic64_add_return, __VA_ARGS__)
191 #ifndef atomic64_add_return
192 #define atomic64_add_return(...) \
193 __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
195 #endif /* atomic64_add_return_relaxed */
197 /* atomic64_sub_return_relaxed */
198 #ifndef atomic64_sub_return_relaxed
199 #define atomic64_sub_return_relaxed atomic64_sub_return
200 #define atomic64_sub_return_acquire atomic64_sub_return
201 #define atomic64_sub_return_release atomic64_sub_return
203 #else /* atomic64_sub_return_relaxed */
205 #ifndef atomic64_sub_return_acquire
206 #define atomic64_sub_return_acquire(...) \
207 __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
210 #ifndef atomic64_sub_return_release
211 #define atomic64_sub_return_release(...) \
212 __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
215 #ifndef atomic64_sub_return
216 #define atomic64_sub_return(...) \
217 __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
219 #endif /* atomic64_sub_return_relaxed */
221 /* atomic64_xchg_relaxed */
222 #ifndef atomic64_xchg_relaxed
223 #define atomic64_xchg_relaxed atomic64_xchg
224 #define atomic64_xchg_acquire atomic64_xchg
225 #define atomic64_xchg_release atomic64_xchg
227 #else /* atomic64_xchg_relaxed */
229 #ifndef atomic64_xchg_acquire
230 #define atomic64_xchg_acquire(...) \
231 __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
234 #ifndef atomic64_xchg_release
235 #define atomic64_xchg_release(...) \
236 __atomic_op_release(atomic64_xchg, __VA_ARGS__)
239 #ifndef atomic64_xchg
240 #define atomic64_xchg(...) \
241 __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
243 #endif /* atomic64_xchg_relaxed */
245 /* atomic64_cmpxchg_relaxed */
246 #ifndef atomic64_cmpxchg_relaxed
247 #define atomic64_cmpxchg_relaxed atomic64_cmpxchg
248 #define atomic64_cmpxchg_acquire atomic64_cmpxchg
249 #define atomic64_cmpxchg_release atomic64_cmpxchg
251 #else /* atomic64_cmpxchg_relaxed */
253 #ifndef atomic64_cmpxchg_acquire
254 #define atomic64_cmpxchg_acquire(...) \
255 __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
258 #ifndef atomic64_cmpxchg_release
259 #define atomic64_cmpxchg_release(...) \
260 __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
263 #ifndef atomic64_cmpxchg
264 #define atomic64_cmpxchg(...) \
265 __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
267 #endif /* atomic64_cmpxchg_relaxed */
269 /* cmpxchg_relaxed */
270 #ifndef cmpxchg_relaxed
271 #define cmpxchg_relaxed cmpxchg
272 #define cmpxchg_acquire cmpxchg
273 #define cmpxchg_release cmpxchg
275 #else /* cmpxchg_relaxed */
277 #ifndef cmpxchg_acquire
278 #define cmpxchg_acquire(...) \
279 __atomic_op_acquire(cmpxchg, __VA_ARGS__)
282 #ifndef cmpxchg_release
283 #define cmpxchg_release(...) \
284 __atomic_op_release(cmpxchg, __VA_ARGS__)
288 #define cmpxchg(...) \
289 __atomic_op_fence(cmpxchg, __VA_ARGS__)
291 #endif /* cmpxchg_relaxed */
293 /* cmpxchg64_relaxed */
294 #ifndef cmpxchg64_relaxed
295 #define cmpxchg64_relaxed cmpxchg64
296 #define cmpxchg64_acquire cmpxchg64
297 #define cmpxchg64_release cmpxchg64
299 #else /* cmpxchg64_relaxed */
301 #ifndef cmpxchg64_acquire
302 #define cmpxchg64_acquire(...) \
303 __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
306 #ifndef cmpxchg64_release
307 #define cmpxchg64_release(...) \
308 __atomic_op_release(cmpxchg64, __VA_ARGS__)
312 #define cmpxchg64(...) \
313 __atomic_op_fence(cmpxchg64, __VA_ARGS__)
315 #endif /* cmpxchg64_relaxed */
319 #define xchg_relaxed xchg
320 #define xchg_acquire xchg
321 #define xchg_release xchg
323 #else /* xchg_relaxed */
326 #define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
330 #define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
334 #define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
336 #endif /* xchg_relaxed */
339 * atomic_add_unless - add unless the number is already a given value
340 * @v: pointer of type atomic_t
341 * @a: the amount to add to v...
342 * @u: ...unless v is equal to u.
344 * Atomically adds @a to @v, so long as @v was not already @u.
345 * Returns non-zero if @v was not @u, and zero otherwise.
347 static inline int atomic_add_unless(atomic_t *v, int a, int u)
349 return __atomic_add_unless(v, a, u) != u;
353 * atomic_inc_not_zero - increment unless the number is zero
354 * @v: pointer of type atomic_t
356 * Atomically increments @v by 1, so long as @v is non-zero.
357 * Returns non-zero if @v was non-zero, and zero otherwise.
359 #ifndef atomic_inc_not_zero
360 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
363 #ifndef atomic_andnot
364 static inline void atomic_andnot(int i, atomic_t *v)
370 static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
372 atomic_andnot(mask, v);
375 static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
381 * atomic_inc_not_zero_hint - increment if not null
382 * @v: pointer of type atomic_t
383 * @hint: probable value of the atomic before the increment
385 * This version of atomic_inc_not_zero() gives a hint of probable
386 * value of the atomic. This helps processor to not read the memory
387 * before doing the atomic read/modify/write cycle, lowering
388 * number of bus transactions on some arches.
390 * Returns: 0 if increment was not done, 1 otherwise.
392 #ifndef atomic_inc_not_zero_hint
393 static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
397 /* sanity test, should be removed by compiler if hint is a constant */
399 return atomic_inc_not_zero(v);
402 val = atomic_cmpxchg(v, c, c + 1);
412 #ifndef atomic_inc_unless_negative
413 static inline int atomic_inc_unless_negative(atomic_t *p)
416 for (v = 0; v >= 0; v = v1) {
417 v1 = atomic_cmpxchg(p, v, v + 1);
425 #ifndef atomic_dec_unless_positive
426 static inline int atomic_dec_unless_positive(atomic_t *p)
429 for (v = 0; v <= 0; v = v1) {
430 v1 = atomic_cmpxchg(p, v, v - 1);
439 * atomic_dec_if_positive - decrement by 1 if old value positive
440 * @v: pointer of type atomic_t
442 * The function returns the old value of *v minus 1, even if
443 * the atomic variable, v, was not decremented.
445 #ifndef atomic_dec_if_positive
446 static inline int atomic_dec_if_positive(atomic_t *v)
452 if (unlikely(dec < 0))
454 old = atomic_cmpxchg((v), c, dec);
455 if (likely(old == c))
463 #ifdef CONFIG_GENERIC_ATOMIC64
464 #include <asm-generic/atomic64.h>
467 #ifndef atomic64_read_ctrl
468 static inline long long atomic64_read_ctrl(const atomic64_t *v)
470 long long val = atomic64_read(v);
471 smp_read_barrier_depends(); /* Enforce control dependency. */
476 #ifndef atomic64_andnot
477 static inline void atomic64_andnot(long long i, atomic64_t *v)
483 #include <asm-generic/atomic-long.h>
485 #endif /* _LINUX_ATOMIC_H */