]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
locking/mutexes: Unlock the mutex without the wait_lock
authorJason Low <jason.low2@hp.com>
Tue, 28 Jan 2014 19:13:14 +0000 (11:13 -0800)
committerIngo Molnar <mingo@kernel.org>
Tue, 11 Mar 2014 11:14:54 +0000 (12:14 +0100)
When running workloads that have high contention in mutexes on an 8 socket
machine, mutex spinners would often spin for a long time with no lock owner.

The main reason why this is occuring is in __mutex_unlock_common_slowpath(),
if __mutex_slowpath_needs_to_unlock(), then the owner needs to acquire the
mutex->wait_lock before releasing the mutex (setting lock->count to 1). When
the wait_lock is contended, this delays the mutex from being released.
We should be able to release the mutex without holding the wait_lock.

Signed-off-by: Jason Low <jason.low2@hp.com>
Cc: chegu_vinod@hp.com
Cc: paulmck@linux.vnet.ibm.com
Cc: Waiman.Long@hp.com
Cc: torvalds@linux-foundation.org
Cc: tglx@linutronix.de
Cc: riel@redhat.com
Cc: akpm@linux-foundation.org
Cc: davidlohr@hp.com
Cc: hpa@zytor.com
Cc: andi@firstfloor.org
Cc: aswin@hp.com
Cc: scott.norton@hp.com
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1390936396-3962-4-git-send-email-jason.low2@hp.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/mutex.c

index 82dad2ccd40be3ecb12edb9d1fe065ea33f8822b..dc3d6f2bbe2a0a137be59ef1a50ad226c31f40e7 100644 (file)
@@ -671,10 +671,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
        struct mutex *lock = container_of(lock_count, struct mutex, count);
        unsigned long flags;
 
-       spin_lock_mutex(&lock->wait_lock, flags);
-       mutex_release(&lock->dep_map, nested, _RET_IP_);
-       debug_mutex_unlock(lock);
-
        /*
         * some architectures leave the lock unlocked in the fastpath failure
         * case, others need to leave it locked. In the later case we have to
@@ -683,6 +679,10 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
        if (__mutex_slowpath_needs_to_unlock())
                atomic_set(&lock->count, 1);
 
+       spin_lock_mutex(&lock->wait_lock, flags);
+       mutex_release(&lock->dep_map, nested, _RET_IP_);
+       debug_mutex_unlock(lock);
+
        if (!list_empty(&lock->wait_list)) {
                /* get the first entry from the wait-list: */
                struct mutex_waiter *waiter =