]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - kernel/locking/percpu-rwsem.c
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / kernel / locking / percpu-rwsem.c
1 #include <linux/atomic.h>
2 #include <linux/rwsem.h>
3 #include <linux/percpu.h>
4 #include <linux/wait.h>
5 #include <linux/lockdep.h>
6 #include <linux/percpu-rwsem.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/errno.h>
10
11 int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
12                         const char *name, struct lock_class_key *rwsem_key)
13 {
14         brw->fast_read_ctr = alloc_percpu(int);
15         if (unlikely(!brw->fast_read_ctr))
16                 return -ENOMEM;
17
18         /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
19         __init_rwsem(&brw->rw_sem, name, rwsem_key);
20         rcu_sync_init(&brw->rss, RCU_SCHED_SYNC);
21         atomic_set(&brw->slow_read_ctr, 0);
22         init_waitqueue_head(&brw->write_waitq);
23         return 0;
24 }
25 EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
26
27 void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
28 {
29         /*
30          * XXX: temporary kludge. The error path in alloc_super()
31          * assumes that percpu_free_rwsem() is safe after kzalloc().
32          */
33         if (!brw->fast_read_ctr)
34                 return;
35
36         rcu_sync_dtor(&brw->rss);
37         free_percpu(brw->fast_read_ctr);
38         brw->fast_read_ctr = NULL; /* catch use after free bugs */
39 }
40
41 /*
42  * This is the fast-path for down_read/up_read. If it succeeds we rely
43  * on the barriers provided by rcu_sync_enter/exit; see the comments in
44  * percpu_down_write() and percpu_up_write().
45  *
46  * If this helper fails the callers rely on the normal rw_semaphore and
47  * atomic_dec_and_test(), so in this case we have the necessary barriers.
48  */
49 static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
50 {
51         bool success;
52
53         preempt_disable();
54         success = rcu_sync_is_idle(&brw->rss);
55         if (likely(success))
56                 __this_cpu_add(*brw->fast_read_ctr, val);
57         preempt_enable();
58
59         return success;
60 }
61
62 /*
63  * Like the normal down_read() this is not recursive, the writer can
64  * come after the first percpu_down_read() and create the deadlock.
65  *
66  * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
67  * percpu_up_read() does rwsem_release(). This pairs with the usage
68  * of ->rw_sem in percpu_down/up_write().
69  */
70 void percpu_down_read(struct percpu_rw_semaphore *brw)
71 {
72         might_sleep();
73         rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
74
75         if (likely(update_fast_ctr(brw, +1)))
76                 return;
77
78         /* Avoid rwsem_acquire_read() and rwsem_release() */
79         __down_read(&brw->rw_sem);
80         atomic_inc(&brw->slow_read_ctr);
81         __up_read(&brw->rw_sem);
82 }
83 EXPORT_SYMBOL_GPL(percpu_down_read);
84
85 int percpu_down_read_trylock(struct percpu_rw_semaphore *brw)
86 {
87         if (unlikely(!update_fast_ctr(brw, +1))) {
88                 if (!__down_read_trylock(&brw->rw_sem))
89                         return 0;
90                 atomic_inc(&brw->slow_read_ctr);
91                 __up_read(&brw->rw_sem);
92         }
93
94         rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 1, _RET_IP_);
95         return 1;
96 }
97
98 void percpu_up_read(struct percpu_rw_semaphore *brw)
99 {
100         rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
101
102         if (likely(update_fast_ctr(brw, -1)))
103                 return;
104
105         /* false-positive is possible but harmless */
106         if (atomic_dec_and_test(&brw->slow_read_ctr))
107                 wake_up_all(&brw->write_waitq);
108 }
109 EXPORT_SYMBOL_GPL(percpu_up_read);
110
111 static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
112 {
113         unsigned int sum = 0;
114         int cpu;
115
116         for_each_possible_cpu(cpu) {
117                 sum += per_cpu(*brw->fast_read_ctr, cpu);
118                 per_cpu(*brw->fast_read_ctr, cpu) = 0;
119         }
120
121         return sum;
122 }
123
124 void percpu_down_write(struct percpu_rw_semaphore *brw)
125 {
126         /*
127          * Make rcu_sync_is_idle() == F and thus disable the fast-path in
128          * percpu_down_read() and percpu_up_read(), and wait for gp pass.
129          *
130          * The latter synchronises us with the preceding readers which used
131          * the fast-past, so we can not miss the result of __this_cpu_add()
132          * or anything else inside their criticial sections.
133          */
134         rcu_sync_enter(&brw->rss);
135
136         /* exclude other writers, and block the new readers completely */
137         down_write(&brw->rw_sem);
138
139         /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
140         atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
141
142         /* wait for all readers to complete their percpu_up_read() */
143         wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
144 }
145 EXPORT_SYMBOL_GPL(percpu_down_write);
146
147 void percpu_up_write(struct percpu_rw_semaphore *brw)
148 {
149         /* release the lock, but the readers can't use the fast-path */
150         up_write(&brw->rw_sem);
151         /*
152          * Enable the fast-path in percpu_down_read() and percpu_up_read()
153          * but only after another gp pass; this adds the necessary barrier
154          * to ensure the reader can't miss the changes done by us.
155          */
156         rcu_sync_exit(&brw->rss);
157 }
158 EXPORT_SYMBOL_GPL(percpu_up_write);