2 * kernel/locking/mutex.c
4 * Mutexes: blocking mutual exclusion locks
6 * Started by Ingo Molnar:
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
18 * Also see Documentation/mutex-design.txt.
20 #include <linux/mutex.h>
21 #include <linux/ww_mutex.h>
22 #include <linux/sched.h>
23 #include <linux/sched/rt.h>
24 #include <linux/export.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/debug_locks.h>
28 #include "mcs_spinlock.h"
31 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
32 * which forces all calls into the slowpath:
34 #ifdef CONFIG_DEBUG_MUTEXES
35 # include "mutex-debug.h"
36 # include <asm-generic/mutex-null.h>
39 # include <asm/mutex.h>
43 * A negative mutex count indicates that waiters are sleeping waiting for the
46 #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
49 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
51 atomic_set(&lock->count, 1);
52 spin_lock_init(&lock->wait_lock);
53 INIT_LIST_HEAD(&lock->wait_list);
54 mutex_clear_owner(lock);
55 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
56 lock->mcs_lock = NULL;
59 debug_mutex_init(lock, name, key);
62 EXPORT_SYMBOL(__mutex_init);
64 #ifndef CONFIG_DEBUG_LOCK_ALLOC
66 * We split the mutex lock/unlock logic into separate fastpath and
67 * slowpath functions, to reduce the register pressure on the fastpath.
68 * We also put the fastpath first in the kernel image, to make sure the
69 * branch is predicted by the CPU as default-untaken.
71 static __used noinline void __sched
72 __mutex_lock_slowpath(atomic_t *lock_count);
75 * mutex_lock - acquire the mutex
76 * @lock: the mutex to be acquired
78 * Lock the mutex exclusively for this task. If the mutex is not
79 * available right now, it will sleep until it can get it.
81 * The mutex must later on be released by the same task that
82 * acquired it. Recursive locking is not allowed. The task
83 * may not exit without first unlocking the mutex. Also, kernel
84 * memory where the mutex resides mutex must not be freed with
85 * the mutex still locked. The mutex must first be initialized
86 * (or statically defined) before it can be locked. memset()-ing
87 * the mutex to 0 is not allowed.
89 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
90 * checks that will enforce the restrictions and will also do
91 * deadlock debugging. )
93 * This function is similar to (but not equivalent to) down().
95 void __sched mutex_lock(struct mutex *lock)
99 * The locking fastpath is the 1->0 transition from
100 * 'unlocked' into 'locked' state.
102 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
103 mutex_set_owner(lock);
106 EXPORT_SYMBOL(mutex_lock);
109 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
111 * In order to avoid a stampede of mutex spinners from acquiring the mutex
112 * more or less simultaneously, the spinners need to acquire a MCS lock
113 * first before spinning on the owner field.
118 * Mutex spinning code migrated from kernel/sched/core.c
121 static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
123 if (lock->owner != owner)
127 * Ensure we emit the owner->on_cpu, dereference _after_ checking
128 * lock->owner still matches owner, if that fails, owner might
129 * point to free()d memory, if it still matches, the rcu_read_lock()
130 * ensures the memory stays valid.
134 return owner->on_cpu;
138 * Look out! "owner" is an entirely speculative pointer
139 * access and not reliable.
142 int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
145 while (owner_running(lock, owner)) {
149 arch_mutex_cpu_relax();
154 * We break out the loop above on need_resched() and when the
155 * owner changed, which is a sign for heavy contention. Return
156 * success only when lock->owner is NULL.
158 return lock->owner == NULL;
162 * Initial check for entering the mutex spinning loop
164 static inline int mutex_can_spin_on_owner(struct mutex *lock)
166 struct task_struct *owner;
173 owner = ACCESS_ONCE(lock->owner);
175 retval = owner->on_cpu;
178 * if lock->owner is not set, the mutex owner may have just acquired
179 * it and not set the owner yet or the mutex has been released.
185 static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
188 * mutex_unlock - release the mutex
189 * @lock: the mutex to be released
191 * Unlock a mutex that has been locked by this task previously.
193 * This function must not be used in interrupt context. Unlocking
194 * of a not locked mutex is not allowed.
196 * This function is similar to (but not equivalent to) up().
198 void __sched mutex_unlock(struct mutex *lock)
201 * The unlocking fastpath is the 0->1 transition from 'locked'
202 * into 'unlocked' state:
204 #ifndef CONFIG_DEBUG_MUTEXES
206 * When debugging is enabled we must not clear the owner before time,
207 * the slow path will always be taken, and that clears the owner field
208 * after verifying that it was indeed current.
210 mutex_clear_owner(lock);
212 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
215 EXPORT_SYMBOL(mutex_unlock);
218 * ww_mutex_unlock - release the w/w mutex
219 * @lock: the mutex to be released
221 * Unlock a mutex that has been locked by this task previously with any of the
222 * ww_mutex_lock* functions (with or without an acquire context). It is
223 * forbidden to release the locks after releasing the acquire context.
225 * This function must not be used in interrupt context. Unlocking
226 * of a unlocked mutex is not allowed.
228 void __sched ww_mutex_unlock(struct ww_mutex *lock)
231 * The unlocking fastpath is the 0->1 transition from 'locked'
232 * into 'unlocked' state:
235 #ifdef CONFIG_DEBUG_MUTEXES
236 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
238 if (lock->ctx->acquired > 0)
239 lock->ctx->acquired--;
243 #ifndef CONFIG_DEBUG_MUTEXES
245 * When debugging is enabled we must not clear the owner before time,
246 * the slow path will always be taken, and that clears the owner field
247 * after verifying that it was indeed current.
249 mutex_clear_owner(&lock->base);
251 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
253 EXPORT_SYMBOL(ww_mutex_unlock);
255 static inline int __sched
256 __mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
258 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
259 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
264 if (unlikely(ctx == hold_ctx))
267 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
268 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
269 #ifdef CONFIG_DEBUG_MUTEXES
270 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
271 ctx->contending_lock = ww;
279 static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
280 struct ww_acquire_ctx *ww_ctx)
282 #ifdef CONFIG_DEBUG_MUTEXES
284 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
285 * but released with a normal mutex_unlock in this call.
287 * This should never happen, always use ww_mutex_unlock.
289 DEBUG_LOCKS_WARN_ON(ww->ctx);
292 * Not quite done after calling ww_acquire_done() ?
294 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
296 if (ww_ctx->contending_lock) {
298 * After -EDEADLK you tried to
299 * acquire a different ww_mutex? Bad!
301 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
304 * You called ww_mutex_lock after receiving -EDEADLK,
305 * but 'forgot' to unlock everything else first?
307 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
308 ww_ctx->contending_lock = NULL;
312 * Naughty, using a different class will lead to undefined behavior!
314 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
320 * after acquiring lock with fastpath or when we lost out in contested
321 * slowpath, set ctx and wake up any waiters so they can recheck.
323 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
324 * as the fastpath and opportunistic spinning are disabled in that case.
326 static __always_inline void
327 ww_mutex_set_context_fastpath(struct ww_mutex *lock,
328 struct ww_acquire_ctx *ctx)
331 struct mutex_waiter *cur;
333 ww_mutex_lock_acquired(lock, ctx);
338 * The lock->ctx update should be visible on all cores before
339 * the atomic read is done, otherwise contended waiters might be
340 * missed. The contended waiters will either see ww_ctx == NULL
341 * and keep spinning, or it will acquire wait_lock, add itself
342 * to waiter list and sleep.
347 * Check if lock is contended, if not there is nobody to wake up
349 if (likely(atomic_read(&lock->base.count) == 0))
353 * Uh oh, we raced in fastpath, wake up everyone in this case,
354 * so they can see the new lock->ctx.
356 spin_lock_mutex(&lock->base.wait_lock, flags);
357 list_for_each_entry(cur, &lock->base.wait_list, list) {
358 debug_mutex_wake_waiter(&lock->base, cur);
359 wake_up_process(cur->task);
361 spin_unlock_mutex(&lock->base.wait_lock, flags);
365 * Lock a mutex (possibly interruptible), slowpath:
367 static __always_inline int __sched
368 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
369 struct lockdep_map *nest_lock, unsigned long ip,
370 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
372 struct task_struct *task = current;
373 struct mutex_waiter waiter;
378 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
380 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
382 * Optimistic spinning.
384 * We try to spin for acquisition when we find that there are no
385 * pending waiters and the lock owner is currently running on a
388 * The rationale is that if the lock owner is running, it is likely to
389 * release the lock soon.
391 * Since this needs the lock owner, and this mutex implementation
392 * doesn't track the owner atomically in the lock field, we need to
393 * track it non-atomically.
395 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
396 * to serialize everything.
398 * The mutex spinners are queued up using MCS lock so that only one
399 * spinner can compete for the mutex. However, if mutex spinning isn't
400 * going to happen, there is no point in going through the lock/unlock
403 if (!mutex_can_spin_on_owner(lock))
406 mcs_spin_lock(&lock->mcs_lock, &node);
408 struct task_struct *owner;
410 if (use_ww_ctx && ww_ctx->acquired > 0) {
413 ww = container_of(lock, struct ww_mutex, base);
415 * If ww->ctx is set the contents are undefined, only
416 * by acquiring wait_lock there is a guarantee that
417 * they are not invalid when reading.
419 * As such, when deadlock detection needs to be
420 * performed the optimistic spinning cannot be done.
422 if (ACCESS_ONCE(ww->ctx))
427 * If there's an owner, wait for it to either
428 * release the lock or go to sleep.
430 owner = ACCESS_ONCE(lock->owner);
431 if (owner && !mutex_spin_on_owner(lock, owner))
434 if ((atomic_read(&lock->count) == 1) &&
435 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
436 lock_acquired(&lock->dep_map, ip);
439 ww = container_of(lock, struct ww_mutex, base);
441 ww_mutex_set_context_fastpath(ww, ww_ctx);
444 mutex_set_owner(lock);
445 mcs_spin_unlock(&lock->mcs_lock, &node);
451 * When there's no owner, we might have preempted between the
452 * owner acquiring the lock and setting the owner field. If
453 * we're an RT task that will live-lock because we won't let
454 * the owner complete.
456 if (!owner && (need_resched() || rt_task(task)))
460 * The cpu_relax() call is a compiler barrier which forces
461 * everything in this loop to be re-loaded. We don't need
462 * memory barriers as we'll eventually observe the right
463 * values at the cost of a few extra spins.
465 arch_mutex_cpu_relax();
467 mcs_spin_unlock(&lock->mcs_lock, &node);
470 spin_lock_mutex(&lock->wait_lock, flags);
472 /* once more, can we acquire the lock? */
473 if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
476 debug_mutex_lock_common(lock, &waiter);
477 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
479 /* add waiting tasks to the end of the waitqueue (FIFO): */
480 list_add_tail(&waiter.list, &lock->wait_list);
483 lock_contended(&lock->dep_map, ip);
487 * Lets try to take the lock again - this is needed even if
488 * we get here for the first time (shortly after failing to
489 * acquire the lock), to make sure that we get a wakeup once
490 * it's unlocked. Later on, if we sleep, this is the
491 * operation that gives us the lock. We xchg it to -1, so
492 * that when we release the lock, we properly wake up the
495 if (MUTEX_SHOW_NO_WAITER(lock) &&
496 (atomic_xchg(&lock->count, -1) == 1))
500 * got a signal? (This code gets eliminated in the
501 * TASK_UNINTERRUPTIBLE case.)
503 if (unlikely(signal_pending_state(state, task))) {
508 if (use_ww_ctx && ww_ctx->acquired > 0) {
509 ret = __mutex_lock_check_stamp(lock, ww_ctx);
514 __set_task_state(task, state);
516 /* didn't get the lock, go to sleep: */
517 spin_unlock_mutex(&lock->wait_lock, flags);
518 schedule_preempt_disabled();
519 spin_lock_mutex(&lock->wait_lock, flags);
521 mutex_remove_waiter(lock, &waiter, current_thread_info());
522 /* set it to 0 if there are no waiters left: */
523 if (likely(list_empty(&lock->wait_list)))
524 atomic_set(&lock->count, 0);
525 debug_mutex_free_waiter(&waiter);
528 /* got the lock - cleanup and rejoice! */
529 lock_acquired(&lock->dep_map, ip);
530 mutex_set_owner(lock);
533 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
534 struct mutex_waiter *cur;
537 * This branch gets optimized out for the common case,
538 * and is only important for ww_mutex_lock.
540 ww_mutex_lock_acquired(ww, ww_ctx);
544 * Give any possible sleeping processes the chance to wake up,
545 * so they can recheck if they have to back off.
547 list_for_each_entry(cur, &lock->wait_list, list) {
548 debug_mutex_wake_waiter(lock, cur);
549 wake_up_process(cur->task);
553 spin_unlock_mutex(&lock->wait_lock, flags);
558 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
559 spin_unlock_mutex(&lock->wait_lock, flags);
560 debug_mutex_free_waiter(&waiter);
561 mutex_release(&lock->dep_map, 1, ip);
566 #ifdef CONFIG_DEBUG_LOCK_ALLOC
568 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
571 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
572 subclass, NULL, _RET_IP_, NULL, 0);
575 EXPORT_SYMBOL_GPL(mutex_lock_nested);
578 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
581 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
582 0, nest, _RET_IP_, NULL, 0);
585 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
588 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
591 return __mutex_lock_common(lock, TASK_KILLABLE,
592 subclass, NULL, _RET_IP_, NULL, 0);
594 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
597 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
600 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
601 subclass, NULL, _RET_IP_, NULL, 0);
604 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
607 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
609 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
612 if (ctx->deadlock_inject_countdown-- == 0) {
613 tmp = ctx->deadlock_inject_interval;
614 if (tmp > UINT_MAX/4)
617 tmp = tmp*2 + tmp + tmp/2;
619 ctx->deadlock_inject_interval = tmp;
620 ctx->deadlock_inject_countdown = tmp;
621 ctx->contending_lock = lock;
623 ww_mutex_unlock(lock);
633 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
638 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
639 0, &ctx->dep_map, _RET_IP_, ctx, 1);
640 if (!ret && ctx->acquired > 1)
641 return ww_mutex_deadlock_injection(lock, ctx);
645 EXPORT_SYMBOL_GPL(__ww_mutex_lock);
648 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
653 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
654 0, &ctx->dep_map, _RET_IP_, ctx, 1);
656 if (!ret && ctx->acquired > 1)
657 return ww_mutex_deadlock_injection(lock, ctx);
661 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
666 * Release the lock, slowpath:
669 __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
671 struct mutex *lock = container_of(lock_count, struct mutex, count);
675 * some architectures leave the lock unlocked in the fastpath failure
676 * case, others need to leave it locked. In the later case we have to
679 if (__mutex_slowpath_needs_to_unlock())
680 atomic_set(&lock->count, 1);
682 spin_lock_mutex(&lock->wait_lock, flags);
683 mutex_release(&lock->dep_map, nested, _RET_IP_);
684 debug_mutex_unlock(lock);
686 if (!list_empty(&lock->wait_list)) {
687 /* get the first entry from the wait-list: */
688 struct mutex_waiter *waiter =
689 list_entry(lock->wait_list.next,
690 struct mutex_waiter, list);
692 debug_mutex_wake_waiter(lock, waiter);
694 wake_up_process(waiter->task);
697 spin_unlock_mutex(&lock->wait_lock, flags);
701 * Release the lock, slowpath:
703 static __used noinline void
704 __mutex_unlock_slowpath(atomic_t *lock_count)
706 __mutex_unlock_common_slowpath(lock_count, 1);
709 #ifndef CONFIG_DEBUG_LOCK_ALLOC
711 * Here come the less common (and hence less performance-critical) APIs:
712 * mutex_lock_interruptible() and mutex_trylock().
714 static noinline int __sched
715 __mutex_lock_killable_slowpath(struct mutex *lock);
717 static noinline int __sched
718 __mutex_lock_interruptible_slowpath(struct mutex *lock);
721 * mutex_lock_interruptible - acquire the mutex, interruptible
722 * @lock: the mutex to be acquired
724 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
725 * been acquired or sleep until the mutex becomes available. If a
726 * signal arrives while waiting for the lock then this function
729 * This function is similar to (but not equivalent to) down_interruptible().
731 int __sched mutex_lock_interruptible(struct mutex *lock)
736 ret = __mutex_fastpath_lock_retval(&lock->count);
738 mutex_set_owner(lock);
741 return __mutex_lock_interruptible_slowpath(lock);
744 EXPORT_SYMBOL(mutex_lock_interruptible);
746 int __sched mutex_lock_killable(struct mutex *lock)
751 ret = __mutex_fastpath_lock_retval(&lock->count);
753 mutex_set_owner(lock);
756 return __mutex_lock_killable_slowpath(lock);
758 EXPORT_SYMBOL(mutex_lock_killable);
760 static __used noinline void __sched
761 __mutex_lock_slowpath(atomic_t *lock_count)
763 struct mutex *lock = container_of(lock_count, struct mutex, count);
765 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
766 NULL, _RET_IP_, NULL, 0);
769 static noinline int __sched
770 __mutex_lock_killable_slowpath(struct mutex *lock)
772 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
773 NULL, _RET_IP_, NULL, 0);
776 static noinline int __sched
777 __mutex_lock_interruptible_slowpath(struct mutex *lock)
779 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
780 NULL, _RET_IP_, NULL, 0);
783 static noinline int __sched
784 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
786 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
787 NULL, _RET_IP_, ctx, 1);
790 static noinline int __sched
791 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
792 struct ww_acquire_ctx *ctx)
794 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
795 NULL, _RET_IP_, ctx, 1);
801 * Spinlock based trylock, we take the spinlock and check whether we
804 static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
806 struct mutex *lock = container_of(lock_count, struct mutex, count);
810 spin_lock_mutex(&lock->wait_lock, flags);
812 prev = atomic_xchg(&lock->count, -1);
813 if (likely(prev == 1)) {
814 mutex_set_owner(lock);
815 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
818 /* Set it back to 0 if there are no waiters: */
819 if (likely(list_empty(&lock->wait_list)))
820 atomic_set(&lock->count, 0);
822 spin_unlock_mutex(&lock->wait_lock, flags);
828 * mutex_trylock - try to acquire the mutex, without waiting
829 * @lock: the mutex to be acquired
831 * Try to acquire the mutex atomically. Returns 1 if the mutex
832 * has been acquired successfully, and 0 on contention.
834 * NOTE: this function follows the spin_trylock() convention, so
835 * it is negated from the down_trylock() return values! Be careful
836 * about this when converting semaphore users to mutexes.
838 * This function must not be used in interrupt context. The
839 * mutex must be released by the same task that acquired it.
841 int __sched mutex_trylock(struct mutex *lock)
845 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
847 mutex_set_owner(lock);
851 EXPORT_SYMBOL(mutex_trylock);
853 #ifndef CONFIG_DEBUG_LOCK_ALLOC
855 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
861 ret = __mutex_fastpath_lock_retval(&lock->base.count);
864 ww_mutex_set_context_fastpath(lock, ctx);
865 mutex_set_owner(&lock->base);
867 ret = __ww_mutex_lock_slowpath(lock, ctx);
870 EXPORT_SYMBOL(__ww_mutex_lock);
873 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
879 ret = __mutex_fastpath_lock_retval(&lock->base.count);
882 ww_mutex_set_context_fastpath(lock, ctx);
883 mutex_set_owner(&lock->base);
885 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
888 EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
893 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
894 * @cnt: the atomic which we are to dec
895 * @lock: the mutex to return holding if we dec to 0
897 * return true and hold lock if we dec to 0, return false otherwise
899 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
901 /* dec if we can't possibly hit 0 */
902 if (atomic_add_unless(cnt, -1, 1))
904 /* we might hit 0, so take the lock */
906 if (!atomic_dec_and_test(cnt)) {
907 /* when we actually did the dec, we didn't hit 0 */
911 /* we hit 0, and we hold the lock */
914 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);