2 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4 * Based on bo.c which bears the following copyright notice,
5 * but is dual licensed:
7 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sub license, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial portions
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 **************************************************************************/
32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
35 #include <linux/reservation.h>
36 #include <linux/export.h>
38 DEFINE_WW_CLASS(reservation_ww_class);
39 EXPORT_SYMBOL(reservation_ww_class);
41 struct lock_class_key reservation_seqcount_class;
42 EXPORT_SYMBOL(reservation_seqcount_class);
44 const char reservation_seqcount_string[] = "reservation_seqcount";
45 EXPORT_SYMBOL(reservation_seqcount_string);
47 * Reserve space to add a shared fence to a reservation_object,
48 * must be called with obj->lock held.
50 int reservation_object_reserve_shared(struct reservation_object *obj)
52 struct reservation_object_list *fobj, *old;
55 old = reservation_object_get_list(obj);
57 if (old && old->shared_max) {
58 if (old->shared_count < old->shared_max) {
59 /* perform an in-place update */
64 max = old->shared_max * 2;
69 * resize obj->staged or allocate if it doesn't exist,
70 * noop if already correct size
72 fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
78 fobj->shared_max = max;
81 EXPORT_SYMBOL(reservation_object_reserve_shared);
84 reservation_object_add_shared_inplace(struct reservation_object *obj,
85 struct reservation_object_list *fobj,
93 write_seqcount_begin(&obj->seq);
95 for (i = 0; i < fobj->shared_count; ++i) {
96 struct fence *old_fence;
98 old_fence = rcu_dereference_protected(fobj->shared[i],
99 reservation_object_held(obj));
101 if (old_fence->context == fence->context) {
102 /* memory barrier is added by write_seqcount_begin */
103 RCU_INIT_POINTER(fobj->shared[i], fence);
104 write_seqcount_end(&obj->seq);
107 fence_put(old_fence);
113 * memory barrier is added by write_seqcount_begin,
114 * fobj->shared_count is protected by this lock too
116 RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
117 fobj->shared_count++;
119 write_seqcount_end(&obj->seq);
124 reservation_object_add_shared_replace(struct reservation_object *obj,
125 struct reservation_object_list *old,
126 struct reservation_object_list *fobj,
130 struct fence *old_fence = NULL;
135 RCU_INIT_POINTER(fobj->shared[0], fence);
136 fobj->shared_count = 1;
141 * no need to bump fence refcounts, rcu_read access
142 * requires the use of kref_get_unless_zero, and the
143 * references from the old struct are carried over to
146 fobj->shared_count = old->shared_count;
148 for (i = 0; i < old->shared_count; ++i) {
151 check = rcu_dereference_protected(old->shared[i],
152 reservation_object_held(obj));
154 if (!old_fence && check->context == fence->context) {
156 RCU_INIT_POINTER(fobj->shared[i], fence);
158 RCU_INIT_POINTER(fobj->shared[i], check);
161 RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
162 fobj->shared_count++;
167 write_seqcount_begin(&obj->seq);
169 * RCU_INIT_POINTER can be used here,
170 * seqcount provides the necessary barriers
172 RCU_INIT_POINTER(obj->fence, fobj);
173 write_seqcount_end(&obj->seq);
180 fence_put(old_fence);
184 * Add a fence to a shared slot, obj->lock must be held, and
185 * reservation_object_reserve_shared_fence has been called.
187 void reservation_object_add_shared_fence(struct reservation_object *obj,
190 struct reservation_object_list *old, *fobj = obj->staged;
192 old = reservation_object_get_list(obj);
196 BUG_ON(old->shared_count >= old->shared_max);
197 reservation_object_add_shared_inplace(obj, old, fence);
199 reservation_object_add_shared_replace(obj, old, fobj, fence);
201 EXPORT_SYMBOL(reservation_object_add_shared_fence);
203 void reservation_object_add_excl_fence(struct reservation_object *obj,
206 struct fence *old_fence = reservation_object_get_excl(obj);
207 struct reservation_object_list *old;
210 old = reservation_object_get_list(obj);
212 i = old->shared_count;
218 write_seqcount_begin(&obj->seq);
219 /* write_seqcount_begin provides the necessary memory barrier */
220 RCU_INIT_POINTER(obj->fence_excl, fence);
222 old->shared_count = 0;
223 write_seqcount_end(&obj->seq);
226 /* inplace update, no shared fences */
228 fence_put(rcu_dereference_protected(old->shared[i],
229 reservation_object_held(obj)));
232 fence_put(old_fence);
234 EXPORT_SYMBOL(reservation_object_add_excl_fence);
236 int reservation_object_get_fences_rcu(struct reservation_object *obj,
237 struct fence **pfence_excl,
238 unsigned *pshared_count,
239 struct fence ***pshared)
241 unsigned shared_count = 0;
243 struct fence **shared = NULL, *fence_excl = NULL;
247 struct reservation_object_list *fobj;
250 seq = read_seqcount_begin(&obj->seq);
254 fobj = rcu_dereference(obj->fence);
256 struct fence **nshared;
257 size_t sz = sizeof(*shared) * fobj->shared_max;
259 nshared = krealloc(shared, sz,
260 GFP_NOWAIT | __GFP_NOWARN);
263 nshared = krealloc(shared, sz, GFP_KERNEL);
274 memcpy(shared, fobj->shared, sz);
275 shared_count = fobj->shared_count;
278 fence_excl = rcu_dereference(obj->fence_excl);
280 retry = read_seqcount_retry(&obj->seq, seq);
284 if (!fence_excl || fence_get_rcu(fence_excl)) {
287 for (i = 0; i < shared_count; ++i) {
288 if (fence_get_rcu(shared[i]))
291 /* uh oh, refcount failed, abort and retry */
293 fence_put(shared[i]);
296 fence_put(fence_excl);
309 *pshared_count = shared_count;
316 *pfence_excl = fence_excl;
320 EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
322 long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
323 bool wait_all, bool intr,
324 unsigned long timeout)
327 unsigned seq, shared_count, i = 0;
331 return reservation_object_test_signaled_rcu(obj, wait_all);
336 seq = read_seqcount_begin(&obj->seq);
340 struct reservation_object_list *fobj =
341 rcu_dereference(obj->fence);
344 shared_count = fobj->shared_count;
346 if (read_seqcount_retry(&obj->seq, seq))
349 for (i = 0; i < shared_count; ++i) {
350 struct fence *lfence = rcu_dereference(fobj->shared[i]);
352 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
355 if (!fence_get_rcu(lfence))
358 if (fence_is_signaled(lfence)) {
369 struct fence *fence_excl = rcu_dereference(obj->fence_excl);
371 if (read_seqcount_retry(&obj->seq, seq))
375 !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
376 if (!fence_get_rcu(fence_excl))
379 if (fence_is_signaled(fence_excl))
380 fence_put(fence_excl);
388 ret = fence_wait_timeout(fence, intr, ret);
390 if (ret > 0 && wait_all && (i + 1 < shared_count))
399 EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
403 reservation_object_test_signaled_single(struct fence *passed_fence)
405 struct fence *fence, *lfence = passed_fence;
408 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
409 fence = fence_get_rcu(lfence);
413 ret = !!fence_is_signaled(fence);
419 bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
422 unsigned seq, shared_count;
427 seq = read_seqcount_begin(&obj->seq);
433 struct reservation_object_list *fobj =
434 rcu_dereference(obj->fence);
437 shared_count = fobj->shared_count;
439 if (read_seqcount_retry(&obj->seq, seq))
442 for (i = 0; i < shared_count; ++i) {
443 struct fence *fence = rcu_dereference(fobj->shared[i]);
445 ret = reservation_object_test_signaled_single(fence);
453 * There could be a read_seqcount_retry here, but nothing cares
454 * about whether it's the old or newer fence pointers that are
455 * signaled. That race could still have happened after checking
456 * read_seqcount_retry. If you care, use ww_mutex_lock.
461 struct fence *fence_excl = rcu_dereference(obj->fence_excl);
463 if (read_seqcount_retry(&obj->seq, seq))
467 ret = reservation_object_test_signaled_single(
481 EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);