]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
locking/mutex: Use acquire/release semantics
authorDavidlohr Bueso <dave@stgolabs.net>
Wed, 30 Sep 2015 20:03:12 +0000 (13:03 -0700)
committerIngo Molnar <mingo@kernel.org>
Tue, 6 Oct 2015 15:28:20 +0000 (17:28 +0200)
As of 654672d4ba1 (locking/atomics: Add _{acquire|release|relaxed}()
variants of some atomic operations) and 6d79ef2d30e (locking, asm-generic:
Add _{relaxed|acquire|release}() variants for 'atomic_long_t'), weakly
ordered archs can benefit from more relaxed use of barriers when locking
and unlocking, instead of regular full barrier semantics. While currently
only arm64 supports such optimizations, updating corresponding locking
primitives serves for other archs to immediately benefit as well, once the
necessary machinery is implemented of course.

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul E.McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/1443643395-17016-3-git-send-email-dave@stgolabs.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/asm-generic/mutex-dec.h
include/asm-generic/mutex-xchg.h
kernel/locking/mutex.c

index d4f9fb4e53dfa7ea2709bc63f5c98f7c72e64cf0..fd694cfd678af712b2c3ca1055331df24a8db8ba 100644 (file)
@@ -20,7 +20,7 @@
 static inline void
 __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
 {
-       if (unlikely(atomic_dec_return(count) < 0))
+       if (unlikely(atomic_dec_return_acquire(count) < 0))
                fail_fn(count);
 }
 
@@ -35,7 +35,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
 static inline int
 __mutex_fastpath_lock_retval(atomic_t *count)
 {
-       if (unlikely(atomic_dec_return(count) < 0))
+       if (unlikely(atomic_dec_return_acquire(count) < 0))
                return -1;
        return 0;
 }
@@ -56,7 +56,7 @@ __mutex_fastpath_lock_retval(atomic_t *count)
 static inline void
 __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
 {
-       if (unlikely(atomic_inc_return(count) <= 0))
+       if (unlikely(atomic_inc_return_release(count) <= 0))
                fail_fn(count);
 }
 
@@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
 static inline int
 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
 {
-       if (likely(atomic_cmpxchg(count, 1, 0) == 1))
+       if (likely(atomic_cmpxchg_acquire(count, 1, 0) == 1))
                return 1;
        return 0;
 }
index f169ec064785beea6c8ce4f1bee1d193a9faf079..a6b4a7bd6ac9770356e066c51f295c6b9c33793f 100644 (file)
@@ -31,7 +31,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
                 * to ensure that any waiting tasks are woken up by the
                 * unlock slow path.
                 */
-               if (likely(atomic_xchg(count, -1) != 1))
+               if (likely(atomic_xchg_acquire(count, -1) != 1))
                        fail_fn(count);
 }
 
@@ -46,7 +46,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
 static inline int
 __mutex_fastpath_lock_retval(atomic_t *count)
 {
-       if (unlikely(atomic_xchg(count, 0) != 1))
+       if (unlikely(atomic_xchg_acquire(count, 0) != 1))
                if (likely(atomic_xchg(count, -1) != 1))
                        return -1;
        return 0;
@@ -67,7 +67,7 @@ __mutex_fastpath_lock_retval(atomic_t *count)
 static inline void
 __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
 {
-       if (unlikely(atomic_xchg(count, 1) != 0))
+       if (unlikely(atomic_xchg_release(count, 1) != 0))
                fail_fn(count);
 }
 
@@ -91,7 +91,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
 static inline int
 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
 {
-       int prev = atomic_xchg(count, 0);
+       int prev = atomic_xchg_acquire(count, 0);
 
        if (unlikely(prev < 0)) {
                /*
@@ -105,7 +105,7 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
                 *   owner's unlock path needlessly, but that's not a problem
                 *   in practice. ]
                 */
-               prev = atomic_xchg(count, prev);
+               prev = atomic_xchg_acquire(count, prev);
                if (prev < 0)
                        prev = 0;
        }
index 4cccea6b8934f5697fa3dfa609ae4bd10db78b6f..0551c219c40e5bb8f8f87bfb0445100fe1b31cc9 100644 (file)
@@ -277,7 +277,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
 static inline bool mutex_try_to_acquire(struct mutex *lock)
 {
        return !mutex_is_locked(lock) &&
-               (atomic_cmpxchg(&lock->count, 1, 0) == 1);
+               (atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1);
 }
 
 /*
@@ -529,7 +529,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
         * Once more, try to acquire the lock. Only try-lock the mutex if
         * it is unlocked to reduce unnecessary xchg() operations.
         */
-       if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1))
+       if (!mutex_is_locked(lock) &&
+           (atomic_xchg_acquire(&lock->count, 0) == 1))
                goto skip_wait;
 
        debug_mutex_lock_common(lock, &waiter);
@@ -553,7 +554,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                 * non-negative in order to avoid unnecessary xchg operations:
                 */
                if (atomic_read(&lock->count) >= 0 &&
-                   (atomic_xchg(&lock->count, -1) == 1))
+                   (atomic_xchg_acquire(&lock->count, -1) == 1))
                        break;
 
                /*
@@ -867,7 +868,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
 
        spin_lock_mutex(&lock->wait_lock, flags);
 
-       prev = atomic_xchg(&lock->count, -1);
+       prev = atomic_xchg_acquire(&lock->count, -1);
        if (likely(prev == 1)) {
                mutex_set_owner(lock);
                mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);