]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/mn10300/include/asm/atomic.h
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
[karo-tx-linux.git] / arch / mn10300 / include / asm / atomic.h
1 /* MN10300 Atomic counter operations
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 #ifndef _ASM_ATOMIC_H
12 #define _ASM_ATOMIC_H
13
14 #include <asm/irqflags.h>
15 #include <asm/cmpxchg.h>
16 #include <asm/barrier.h>
17
18 #ifndef CONFIG_SMP
19 #include <asm-generic/atomic.h>
20 #else
21
22 /*
23  * Atomic operations that C can't guarantee us.  Useful for
24  * resource counting etc..
25  */
26
27 #define ATOMIC_INIT(i)  { (i) }
28
29 #ifdef __KERNEL__
30
31 /**
32  * atomic_read - read atomic variable
33  * @v: pointer of type atomic_t
34  *
35  * Atomically reads the value of @v.  Note that the guaranteed
36  */
37 #define atomic_read(v)  READ_ONCE((v)->counter)
38
39 /**
40  * atomic_set - set atomic variable
41  * @v: pointer of type atomic_t
42  * @i: required value
43  *
44  * Atomically sets the value of @v to @i.  Note that the guaranteed
45  */
46 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
47
48 #define ATOMIC_OP(op)                                                   \
49 static inline void atomic_##op(int i, atomic_t *v)                      \
50 {                                                                       \
51         int retval, status;                                             \
52                                                                         \
53         asm volatile(                                                   \
54                 "1:     mov     %4,(_AAR,%3)    \n"                     \
55                 "       mov     (_ADR,%3),%1    \n"                     \
56                 "       " #op " %5,%1           \n"                     \
57                 "       mov     %1,(_ADR,%3)    \n"                     \
58                 "       mov     (_ADR,%3),%0    \n"     /* flush */     \
59                 "       mov     (_ASR,%3),%0    \n"                     \
60                 "       or      %0,%0           \n"                     \
61                 "       bne     1b              \n"                     \
62                 : "=&r"(status), "=&r"(retval), "=m"(v->counter)        \
63                 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)   \
64                 : "memory", "cc");                                      \
65 }
66
67 #define ATOMIC_OP_RETURN(op)                                            \
68 static inline int atomic_##op##_return(int i, atomic_t *v)              \
69 {                                                                       \
70         int retval, status;                                             \
71                                                                         \
72         asm volatile(                                                   \
73                 "1:     mov     %4,(_AAR,%3)    \n"                     \
74                 "       mov     (_ADR,%3),%1    \n"                     \
75                 "       " #op " %5,%1           \n"                     \
76                 "       mov     %1,(_ADR,%3)    \n"                     \
77                 "       mov     (_ADR,%3),%0    \n"     /* flush */     \
78                 "       mov     (_ASR,%3),%0    \n"                     \
79                 "       or      %0,%0           \n"                     \
80                 "       bne     1b              \n"                     \
81                 : "=&r"(status), "=&r"(retval), "=m"(v->counter)        \
82                 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)   \
83                 : "memory", "cc");                                      \
84         return retval;                                                  \
85 }
86
87 #define ATOMIC_FETCH_OP(op)                                             \
88 static inline int atomic_fetch_##op(int i, atomic_t *v)                 \
89 {                                                                       \
90         int retval, status;                                             \
91                                                                         \
92         asm volatile(                                                   \
93                 "1:     mov     %4,(_AAR,%3)    \n"                     \
94                 "       mov     (_ADR,%3),%1    \n"                     \
95                 "       mov     %1,%0           \n"                     \
96                 "       " #op " %5,%0           \n"                     \
97                 "       mov     %0,(_ADR,%3)    \n"                     \
98                 "       mov     (_ADR,%3),%0    \n"     /* flush */     \
99                 "       mov     (_ASR,%3),%0    \n"                     \
100                 "       or      %0,%0           \n"                     \
101                 "       bne     1b              \n"                     \
102                 : "=&r"(status), "=&r"(retval), "=m"(v->counter)        \
103                 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)   \
104                 : "memory", "cc");                                      \
105         return retval;                                                  \
106 }
107
108 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
109
110 ATOMIC_OPS(add)
111 ATOMIC_OPS(sub)
112
113 #undef ATOMIC_OPS
114 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
115
116 ATOMIC_OPS(and)
117 ATOMIC_OPS(or)
118 ATOMIC_OPS(xor)
119
120 #undef ATOMIC_OPS
121 #undef ATOMIC_FETCH_OP
122 #undef ATOMIC_OP_RETURN
123 #undef ATOMIC_OP
124
125 static inline int atomic_add_negative(int i, atomic_t *v)
126 {
127         return atomic_add_return(i, v) < 0;
128 }
129
130 static inline void atomic_inc(atomic_t *v)
131 {
132         atomic_add_return(1, v);
133 }
134
135 static inline void atomic_dec(atomic_t *v)
136 {
137         atomic_sub_return(1, v);
138 }
139
140 #define atomic_dec_return(v)            atomic_sub_return(1, (v))
141 #define atomic_inc_return(v)            atomic_add_return(1, (v))
142
143 #define atomic_sub_and_test(i, v)       (atomic_sub_return((i), (v)) == 0)
144 #define atomic_dec_and_test(v)          (atomic_sub_return(1, (v)) == 0)
145 #define atomic_inc_and_test(v)          (atomic_add_return(1, (v)) == 0)
146
147 #define __atomic_add_unless(v, a, u)                            \
148 ({                                                              \
149         int c, old;                                             \
150         c = atomic_read(v);                                     \
151         while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
152                 c = old;                                        \
153         c;                                                      \
154 })
155
156 #define atomic_xchg(ptr, v)             (xchg(&(ptr)->counter, (v)))
157 #define atomic_cmpxchg(v, old, new)     (cmpxchg(&((v)->counter), (old), (new)))
158
159 #endif /* __KERNEL__ */
160 #endif /* CONFIG_SMP */
161 #endif /* _ASM_ATOMIC_H */