]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/powerpc/include/asm/spinlock.h
powerpc: Remove the remaining CONFIG_PPC_ISERIES pieces
[karo-tx-linux.git] / arch / powerpc / include / asm / spinlock.h
index 764094cff68172609db5ce6d4e05a1a2b8229598..7124fc06ad47d304303b93e725f0c4a1f822d0eb 100644 (file)
 #ifdef CONFIG_PPC64
 #include <asm/paca.h>
 #include <asm/hvcall.h>
-#include <asm/iseries/hv_call.h>
 #endif
 #include <asm/asm-compat.h>
 #include <asm/synch.h>
+#include <asm/ppc-opcode.h>
 
 #define arch_spin_is_locked(x)         ((x)->slock != 0)
 
@@ -60,13 +60,14 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
 
        token = LOCK_TOKEN;
        __asm__ __volatile__(
-"1:    lwarx           %0,0,%2\n\
+"1:    " PPC_LWARX(%0,0,%2,1) "\n\
        cmpwi           0,%0,0\n\
        bne-            2f\n\
        stwcx.          %1,0,%2\n\
-       bne-            1b\n\
-       isync\n\
-2:"    : "=&r" (tmp)
+       bne-            1b\n"
+       PPC_ACQUIRE_BARRIER
+"2:"
+       : "=&r" (tmp)
        : "r" (token), "r" (&lock->slock)
        : "cr0", "memory");
 
@@ -93,12 +94,12 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
  * value.
  */
 
-#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
+#if defined(CONFIG_PPC_SPLPAR)
 /* We only yield to the hypervisor if we are in shared processor mode */
 #define SHARED_PROCESSOR (get_lppaca()->shared_proc)
 extern void __spin_yield(arch_spinlock_t *lock);
 extern void __rw_yield(arch_rwlock_t *lock);
-#else /* SPLPAR || ISERIES */
+#else /* SPLPAR */
 #define __spin_yield(x)        barrier()
 #define __rw_yield(x)  barrier()
 #define SHARED_PROCESSOR       0
@@ -144,7 +145,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
        SYNC_IO;
        __asm__ __volatile__("# arch_spin_unlock\n\t"
-                               LWSYNC_ON_SMP: : :"memory");
+                               PPC_RELEASE_BARRIER: : :"memory");
        lock->slock = 0;
 }
 
@@ -186,15 +187,15 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
        long tmp;
 
        __asm__ __volatile__(
-"1:    lwarx           %0,0,%1\n"
+"1:    " PPC_LWARX(%0,0,%1,1) "\n"
        __DO_SIGN_EXTEND
 "      addic.          %0,%0,1\n\
        ble-            2f\n"
        PPC405_ERR77(0,%1)
 "      stwcx.          %0,0,%1\n\
-       bne-            1b\n\
-       isync\n\
-2:"    : "=&r" (tmp)
+       bne-            1b\n"
+       PPC_ACQUIRE_BARRIER
+"2:"   : "=&r" (tmp)
        : "r" (&rw->lock)
        : "cr0", "xer", "memory");
 
@@ -211,14 +212,14 @@ static inline long __arch_write_trylock(arch_rwlock_t *rw)
 
        token = WRLOCK_TOKEN;
        __asm__ __volatile__(
-"1:    lwarx           %0,0,%2\n\
+"1:    " PPC_LWARX(%0,0,%2,1) "\n\
        cmpwi           0,%0,0\n\
        bne-            2f\n"
        PPC405_ERR77(0,%1)
 "      stwcx.          %1,0,%2\n\
-       bne-            1b\n\
-       isync\n\
-2:"    : "=&r" (tmp)
+       bne-            1b\n"
+       PPC_ACQUIRE_BARRIER
+"2:"   : "=&r" (tmp)
        : "r" (token), "r" (&rw->lock)
        : "cr0", "memory");
 
@@ -269,7 +270,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
 
        __asm__ __volatile__(
        "# read_unlock\n\t"
-       LWSYNC_ON_SMP
+       PPC_RELEASE_BARRIER
 "1:    lwarx           %0,0,%1\n\
        addic           %0,%0,-1\n"
        PPC405_ERR77(0,%1)
@@ -283,7 +284,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
 static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
        __asm__ __volatile__("# write_unlock\n\t"
-                               LWSYNC_ON_SMP: : :"memory");
+                               PPC_RELEASE_BARRIER: : :"memory");
        rw->lock = 0;
 }