]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
s390/spinlock: remove unneeded serializations at unlock
authorChristian Borntraeger <borntraeger@de.ibm.com>
Fri, 9 Oct 2015 10:34:23 +0000 (12:34 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Wed, 14 Oct 2015 12:32:25 +0000 (14:32 +0200)
the kernel locks have aqcuire/release semantics. No operation done
after the lock can be "moved" before the lock and no operation before
the unlock can be moved after the unlock. But it is perfectly fine
that memory accesses which happen code wise after unlock are performed
within the critical section.
On s390x, reads are in-order with other reads (PoP section
"Storage-Operand Fetch References") and writes are in-order with
other writes (PoP section "Storage-Operand Store References"). Writes
are also in-order with reads to the same memory location (PoP section
"Storage-Operand Store References"). To other CPUs (and the channel
subsystem), reads additionally appear to be performed prior to reads or
writes that happen after them in the conceptual sequence (PoP section
"Relation between Operand Accesses").
So at least as observed by other CPUs and the channel subsystem, reads
inside the critical sections will not happen after unlock (and writes
are in-order anyway). That's exactly what we need for "RELEASE
operations" (memory-barriers.txt): "It guarantees that all memory
operations before the RELEASE operation will appear to happen before the
RELEASE operation with respect to the other components of the system."

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-By: Sascha Silbe <silbe@linux.vnet.ibm.com>
[cross-reading and lot of improvements for the patch description]
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/include/asm/spinlock.h

index 0e37cd0412419ffeb870a8e7f0c51c5ca2833b5f..63ebf37d31438a647b8d38177b9cb107995e3e99 100644 (file)
@@ -87,7 +87,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
 {
        typecheck(unsigned int, lp->lock);
        asm volatile(
-               __ASM_BARRIER
                "st     %1,%0\n"
                : "+Q" (lp->lock)
                : "d" (0)
@@ -169,7 +168,6 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
                                                        \
        typecheck(unsigned int *, ptr);                 \
        asm volatile(                                   \
-               "bcr    14,0\n"                         \
                op_string "     %0,%2,%1\n"             \
                : "=d" (old_val), "+Q" (*ptr)           \
                : "d" (op_val)                          \
@@ -243,7 +241,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
 
        rw->owner = 0;
        asm volatile(
-               __ASM_BARRIER
                "st     %1,%0\n"
                : "+Q" (rw->lock)
                : "d" (0)