1 #ifndef __ARCH_M68K_ATOMIC__
2 #define __ARCH_M68K_ATOMIC__
4 #include <linux/types.h>
5 #include <asm/system.h>
8 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc..
13 * We do not have SMP m68k systems, so we don't have to deal with that.
16 #define ATOMIC_INIT(i) { (i) }
18 #define atomic_read(v) (*(volatile int *)&(v)->counter)
19 #define atomic_set(v, i) (((v)->counter) = i)
22 * The ColdFire parts cannot do some immediate to memory operations,
23 * so for them we do not specify the "i" asm constraint.
25 #ifdef CONFIG_COLDFIRE
31 static inline void atomic_add(int i, atomic_t *v)
33 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
36 static inline void atomic_sub(int i, atomic_t *v)
38 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
41 static inline void atomic_inc(atomic_t *v)
43 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
46 static inline void atomic_dec(atomic_t *v)
48 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
51 static inline int atomic_dec_and_test(atomic_t *v)
54 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
58 static inline int atomic_inc_and_test(atomic_t *v)
61 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
65 #ifdef CONFIG_RMW_INSNS
67 static inline int atomic_add_return(int i, atomic_t *v)
76 : "+m" (*v), "=&d" (t), "=&d" (tmp)
77 : "g" (i), "2" (atomic_read(v)));
81 static inline int atomic_sub_return(int i, atomic_t *v)
90 : "+m" (*v), "=&d" (t), "=&d" (tmp)
91 : "g" (i), "2" (atomic_read(v)));
95 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
96 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
98 #else /* !CONFIG_RMW_INSNS */
100 static inline int atomic_add_return(int i, atomic_t * v)
105 local_irq_save(flags);
109 local_irq_restore(flags);
114 static inline int atomic_sub_return(int i, atomic_t * v)
119 local_irq_save(flags);
123 local_irq_restore(flags);
128 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
133 local_irq_save(flags);
134 prev = atomic_read(v);
137 local_irq_restore(flags);
141 static inline int atomic_xchg(atomic_t *v, int new)
146 local_irq_save(flags);
147 prev = atomic_read(v);
149 local_irq_restore(flags);
153 #endif /* !CONFIG_RMW_INSNS */
155 #define atomic_dec_return(v) atomic_sub_return(1, (v))
156 #define atomic_inc_return(v) atomic_add_return(1, (v))
158 static inline int atomic_sub_and_test(int i, atomic_t *v)
161 __asm__ __volatile__("subl %2,%1; seq %0"
162 : "=d" (c), "+m" (*v)
167 static inline int atomic_add_negative(int i, atomic_t *v)
170 __asm__ __volatile__("addl %2,%1; smi %0"
171 : "=d" (c), "+m" (*v)
176 static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
178 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
181 static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
183 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
186 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
191 if (unlikely(c == (u)))
193 old = atomic_cmpxchg((v), c, c + (a));
194 if (likely(old == c))
201 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
203 /* Atomic operations are already serializing */
204 #define smp_mb__before_atomic_dec() barrier()
205 #define smp_mb__after_atomic_dec() barrier()
206 #define smp_mb__before_atomic_inc() barrier()
207 #define smp_mb__after_atomic_inc() barrier()
209 #include <asm-generic/atomic-long.h>
210 #include <asm-generic/atomic64.h>
211 #endif /* __ARCH_M68K_ATOMIC __ */