2 * Generic implementation of 64-bit atomics using spinlocks,
3 * useful on processors that don't have 64-bit atomic instructions.
5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 #include <linux/types.h>
13 #include <linux/cache.h>
14 #include <linux/spinlock.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
17 #include <linux/atomic.h>
20 * We use a hashed array of spinlocks to provide exclusive access
21 * to each atomic64_t variable. Since this is expected to used on
22 * systems with small numbers of CPUs (<= 4 or so), we use a
23 * relatively small array of 16 spinlocks to avoid wasting too much
24 * memory on the spinlock array.
29 * Ensure each lock is in a separate cacheline.
33 char pad[L1_CACHE_BYTES];
34 } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
35 [0 ... (NR_LOCKS - 1)] = {
36 .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
40 static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
42 unsigned long addr = (unsigned long) v;
44 addr >>= L1_CACHE_SHIFT;
45 addr ^= (addr >> 8) ^ (addr >> 16);
46 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
49 long long atomic64_read(const atomic64_t *v)
52 raw_spinlock_t *lock = lock_addr(v);
55 raw_spin_lock_irqsave(lock, flags);
57 raw_spin_unlock_irqrestore(lock, flags);
60 EXPORT_SYMBOL(atomic64_read);
62 void atomic64_set(atomic64_t *v, long long i)
65 raw_spinlock_t *lock = lock_addr(v);
67 raw_spin_lock_irqsave(lock, flags);
69 raw_spin_unlock_irqrestore(lock, flags);
71 EXPORT_SYMBOL(atomic64_set);
73 void atomic64_add(long long a, atomic64_t *v)
76 raw_spinlock_t *lock = lock_addr(v);
78 raw_spin_lock_irqsave(lock, flags);
80 raw_spin_unlock_irqrestore(lock, flags);
82 EXPORT_SYMBOL(atomic64_add);
84 long long atomic64_add_return(long long a, atomic64_t *v)
87 raw_spinlock_t *lock = lock_addr(v);
90 raw_spin_lock_irqsave(lock, flags);
91 val = v->counter += a;
92 raw_spin_unlock_irqrestore(lock, flags);
95 EXPORT_SYMBOL(atomic64_add_return);
97 void atomic64_sub(long long a, atomic64_t *v)
100 raw_spinlock_t *lock = lock_addr(v);
102 raw_spin_lock_irqsave(lock, flags);
104 raw_spin_unlock_irqrestore(lock, flags);
106 EXPORT_SYMBOL(atomic64_sub);
108 long long atomic64_sub_return(long long a, atomic64_t *v)
111 raw_spinlock_t *lock = lock_addr(v);
114 raw_spin_lock_irqsave(lock, flags);
115 val = v->counter -= a;
116 raw_spin_unlock_irqrestore(lock, flags);
119 EXPORT_SYMBOL(atomic64_sub_return);
121 long long atomic64_dec_if_positive(atomic64_t *v)
124 raw_spinlock_t *lock = lock_addr(v);
127 raw_spin_lock_irqsave(lock, flags);
128 val = v->counter - 1;
131 raw_spin_unlock_irqrestore(lock, flags);
134 EXPORT_SYMBOL(atomic64_dec_if_positive);
136 long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
139 raw_spinlock_t *lock = lock_addr(v);
142 raw_spin_lock_irqsave(lock, flags);
146 raw_spin_unlock_irqrestore(lock, flags);
149 EXPORT_SYMBOL(atomic64_cmpxchg);
151 long long atomic64_xchg(atomic64_t *v, long long new)
154 raw_spinlock_t *lock = lock_addr(v);
157 raw_spin_lock_irqsave(lock, flags);
160 raw_spin_unlock_irqrestore(lock, flags);
163 EXPORT_SYMBOL(atomic64_xchg);
165 int atomic64_add_unless(atomic64_t *v, long long a, long long u)
168 raw_spinlock_t *lock = lock_addr(v);
171 raw_spin_lock_irqsave(lock, flags);
172 if (v->counter != u) {
176 raw_spin_unlock_irqrestore(lock, flags);
179 EXPORT_SYMBOL(atomic64_add_unless);