]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/sparc/lib/atomic32.c
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
[karo-tx-linux.git] / arch / sparc / lib / atomic32.c
1 /*
2  * atomic32.c: 32-bit atomic_t implementation
3  *
4  * Copyright (C) 2004 Keith M Wesolowski
5  * Copyright (C) 2007 Kyle McMartin
6  * 
7  * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
8  */
9
10 #include <linux/atomic.h>
11 #include <linux/spinlock.h>
12 #include <linux/module.h>
13
14 #ifdef CONFIG_SMP
15 #define ATOMIC_HASH_SIZE        4
16 #define ATOMIC_HASH(a)  (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
17
18 spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
19         [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
20 };
21
22 #else /* SMP */
23
24 static DEFINE_SPINLOCK(dummy);
25 #define ATOMIC_HASH_SIZE        1
26 #define ATOMIC_HASH(a)          (&dummy)
27
28 #endif /* SMP */
29
30 #define ATOMIC_FETCH_OP(op, c_op)                                       \
31 int atomic_fetch_##op(int i, atomic_t *v)                               \
32 {                                                                       \
33         int ret;                                                        \
34         unsigned long flags;                                            \
35         spin_lock_irqsave(ATOMIC_HASH(v), flags);                       \
36                                                                         \
37         ret = v->counter;                                               \
38         v->counter c_op i;                                              \
39                                                                         \
40         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);                  \
41         return ret;                                                     \
42 }                                                                       \
43 EXPORT_SYMBOL(atomic_fetch_##op);
44
45 #define ATOMIC_OP_RETURN(op, c_op)                                      \
46 int atomic_##op##_return(int i, atomic_t *v)                            \
47 {                                                                       \
48         int ret;                                                        \
49         unsigned long flags;                                            \
50         spin_lock_irqsave(ATOMIC_HASH(v), flags);                       \
51                                                                         \
52         ret = (v->counter c_op i);                                      \
53                                                                         \
54         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);                  \
55         return ret;                                                     \
56 }                                                                       \
57 EXPORT_SYMBOL(atomic_##op##_return);
58
59 ATOMIC_OP_RETURN(add, +=)
60
61 ATOMIC_FETCH_OP(add, +=)
62 ATOMIC_FETCH_OP(and, &=)
63 ATOMIC_FETCH_OP(or, |=)
64 ATOMIC_FETCH_OP(xor, ^=)
65
66 #undef ATOMIC_FETCH_OP
67 #undef ATOMIC_OP_RETURN
68
69 int atomic_xchg(atomic_t *v, int new)
70 {
71         int ret;
72         unsigned long flags;
73
74         spin_lock_irqsave(ATOMIC_HASH(v), flags);
75         ret = v->counter;
76         v->counter = new;
77         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
78         return ret;
79 }
80 EXPORT_SYMBOL(atomic_xchg);
81
82 int atomic_cmpxchg(atomic_t *v, int old, int new)
83 {
84         int ret;
85         unsigned long flags;
86
87         spin_lock_irqsave(ATOMIC_HASH(v), flags);
88         ret = v->counter;
89         if (likely(ret == old))
90                 v->counter = new;
91
92         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
93         return ret;
94 }
95 EXPORT_SYMBOL(atomic_cmpxchg);
96
97 int __atomic_add_unless(atomic_t *v, int a, int u)
98 {
99         int ret;
100         unsigned long flags;
101
102         spin_lock_irqsave(ATOMIC_HASH(v), flags);
103         ret = v->counter;
104         if (ret != u)
105                 v->counter += a;
106         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
107         return ret;
108 }
109 EXPORT_SYMBOL(__atomic_add_unless);
110
111 /* Atomic operations are already serializing */
112 void atomic_set(atomic_t *v, int i)
113 {
114         unsigned long flags;
115
116         spin_lock_irqsave(ATOMIC_HASH(v), flags);
117         v->counter = i;
118         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
119 }
120 EXPORT_SYMBOL(atomic_set);
121
122 unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
123 {
124         unsigned long old, flags;
125
126         spin_lock_irqsave(ATOMIC_HASH(addr), flags);
127         old = *addr;
128         *addr = old | mask;
129         spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
130
131         return old & mask;
132 }
133 EXPORT_SYMBOL(___set_bit);
134
135 unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
136 {
137         unsigned long old, flags;
138
139         spin_lock_irqsave(ATOMIC_HASH(addr), flags);
140         old = *addr;
141         *addr = old & ~mask;
142         spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
143
144         return old & mask;
145 }
146 EXPORT_SYMBOL(___clear_bit);
147
148 unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
149 {
150         unsigned long old, flags;
151
152         spin_lock_irqsave(ATOMIC_HASH(addr), flags);
153         old = *addr;
154         *addr = old ^ mask;
155         spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
156
157         return old & mask;
158 }
159 EXPORT_SYMBOL(___change_bit);
160
161 unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
162 {
163         unsigned long flags;
164         u32 prev;
165
166         spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
167         if ((prev = *ptr) == old)
168                 *ptr = new;
169         spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
170
171         return (unsigned long)prev;
172 }
173 EXPORT_SYMBOL(__cmpxchg_u32);
174
175 unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
176 {
177         unsigned long flags;
178         u32 prev;
179
180         spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
181         prev = *ptr;
182         *ptr = new;
183         spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
184
185         return (unsigned long)prev;
186 }
187 EXPORT_SYMBOL(__xchg_u32);