1 #include <linux/atomic.h>
2 #include <linux/rwsem.h>
3 #include <linux/percpu.h>
4 #include <linux/wait.h>
5 #include <linux/lockdep.h>
6 #include <linux/percpu-rwsem.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/errno.h>
11 int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
12 const char *name, struct lock_class_key *rwsem_key)
14 brw->fast_read_ctr = alloc_percpu(int);
15 if (unlikely(!brw->fast_read_ctr))
18 /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
19 __init_rwsem(&brw->rw_sem, name, rwsem_key);
20 rcu_sync_init(&brw->rss, RCU_SCHED_SYNC);
21 atomic_set(&brw->slow_read_ctr, 0);
22 init_waitqueue_head(&brw->write_waitq);
25 EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
27 void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
30 * XXX: temporary kludge. The error path in alloc_super()
31 * assumes that percpu_free_rwsem() is safe after kzalloc().
33 if (!brw->fast_read_ctr)
36 rcu_sync_dtor(&brw->rss);
37 free_percpu(brw->fast_read_ctr);
38 brw->fast_read_ctr = NULL; /* catch use after free bugs */
42 * This is the fast-path for down_read/up_read, it only needs to ensure
43 * there is no pending writer (atomic_read(write_ctr) == 0) and inc/dec the
44 * fast per-cpu counter. The writer uses synchronize_sched_expedited() to
45 * serialize with the preempt-disabled section below.
47 * The nontrivial part is that we should guarantee acquire/release semantics
50 * R_W: down_write() comes after up_read(), the writer should see all
51 * changes done by the reader
53 * W_R: down_read() comes after up_write(), the reader should see all
54 * changes done by the writer
56 * If this helper fails the callers rely on the normal rw_semaphore and
57 * atomic_dec_and_test(), so in this case we have the necessary barriers.
59 * But if it succeeds we do not have any barriers, atomic_read(write_ctr) or
60 * __this_cpu_add() below can be reordered with any LOAD/STORE done by the
61 * reader inside the critical section. See the comments in down_write and
64 static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
69 success = rcu_sync_is_idle(&brw->rss);
71 __this_cpu_add(*brw->fast_read_ctr, val);
78 * Like the normal down_read() this is not recursive, the writer can
79 * come after the first percpu_down_read() and create the deadlock.
81 * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
82 * percpu_up_read() does rwsem_release(). This pairs with the usage
83 * of ->rw_sem in percpu_down/up_write().
85 void percpu_down_read(struct percpu_rw_semaphore *brw)
88 if (likely(update_fast_ctr(brw, +1))) {
89 rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
93 down_read(&brw->rw_sem);
94 atomic_inc(&brw->slow_read_ctr);
95 /* avoid up_read()->rwsem_release() */
96 __up_read(&brw->rw_sem);
98 EXPORT_SYMBOL_GPL(percpu_down_read);
100 int percpu_down_read_trylock(struct percpu_rw_semaphore *brw)
102 if (unlikely(!update_fast_ctr(brw, +1))) {
103 if (!__down_read_trylock(&brw->rw_sem))
105 atomic_inc(&brw->slow_read_ctr);
106 __up_read(&brw->rw_sem);
109 rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 1, _RET_IP_);
113 void percpu_up_read(struct percpu_rw_semaphore *brw)
115 rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
117 if (likely(update_fast_ctr(brw, -1)))
120 /* false-positive is possible but harmless */
121 if (atomic_dec_and_test(&brw->slow_read_ctr))
122 wake_up_all(&brw->write_waitq);
124 EXPORT_SYMBOL_GPL(percpu_up_read);
126 static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
128 unsigned int sum = 0;
131 for_each_possible_cpu(cpu) {
132 sum += per_cpu(*brw->fast_read_ctr, cpu);
133 per_cpu(*brw->fast_read_ctr, cpu) = 0;
140 * A writer increments ->write_ctr to force the readers to switch to the
141 * slow mode, note the atomic_read() check in update_fast_ctr().
143 * After that the readers can only inc/dec the slow ->slow_read_ctr counter,
144 * ->fast_read_ctr is stable. Once the writer moves its sum into the slow
145 * counter it represents the number of active readers.
147 * Finally the writer takes ->rw_sem for writing and blocks the new readers,
148 * then waits until the slow counter becomes zero.
150 void percpu_down_write(struct percpu_rw_semaphore *brw)
153 * 1. Ensures that write_ctr != 0 is visible to any down_read/up_read
154 * so that update_fast_ctr() can't succeed.
156 * 2. Ensures we see the result of every previous this_cpu_add() in
159 * 3. Ensures that if any reader has exited its critical section via
160 * fast-path, it executes a full memory barrier before we return.
161 * See R_W case in the comment above update_fast_ctr().
163 rcu_sync_enter(&brw->rss);
165 /* exclude other writers, and block the new readers completely */
166 down_write(&brw->rw_sem);
168 /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
169 atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
171 /* wait for all readers to complete their percpu_up_read() */
172 wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
174 EXPORT_SYMBOL_GPL(percpu_down_write);
176 void percpu_up_write(struct percpu_rw_semaphore *brw)
178 /* release the lock, but the readers can't use the fast-path */
179 up_write(&brw->rw_sem);
181 * Insert the barrier before the next fast-path in down_read,
182 * see W_R case in the comment above update_fast_ctr().
184 rcu_sync_exit(&brw->rss);
186 EXPORT_SYMBOL_GPL(percpu_up_write);