]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
ARCv2: STAR 9000837815 workaround hardware exclusive transactions livelock
authorVineet Gupta <vgupta@synopsys.com>
Thu, 11 Dec 2014 10:35:16 +0000 (16:05 +0530)
committerVineet Gupta <vgupta@synopsys.com>
Thu, 25 Jun 2015 00:30:18 +0000 (06:00 +0530)
A quad core SMP build could get into hardware livelock with concurrent
LLOCK/SCOND. Workaround that by adding a PREFETCHW which is serialized by
SCU (System Coherency Unit). It brings the cache line in Exclusive state
and makes others invalidate their lines. This gives enough time for
winner to complete the LLOCK/SCOND, before others can get the line back.

The prefetchw in the ll/sc loop is not nice but this is the only
software workaround for current version of RTL.

Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
arch/arc/include/asm/atomic.h

index 20b7dc17979ea25c1b19e8513f66738fad380adc..03484cb4d16d2eb4fada0095ee427726c23bd2e1 100644 (file)
 
 #define atomic_set(v, i) (((v)->counter) = (i))
 
+#ifdef CONFIG_ISA_ARCV2
+#define PREFETCHW      "       prefetchw   [%1]        \n"
+#else
+#define PREFETCHW
+#endif
+
 #define ATOMIC_OP(op, c_op, asm_op)                                    \
 static inline void atomic_##op(int i, atomic_t *v)                     \
 {                                                                      \
        unsigned int temp;                                              \
                                                                        \
        __asm__ __volatile__(                                           \
-       "1:     llock   %0, [%1]        \n"                             \
+       "1:                             \n"                             \
+       PREFETCHW                                                       \
+       "       llock   %0, [%1]        \n"                             \
        "       " #asm_op " %0, %0, %2  \n"                             \
        "       scond   %0, [%1]        \n"                             \
        "       bnz     1b              \n"                             \
@@ -50,7 +58,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v)            \
        smp_mb();                                                       \
                                                                        \
        __asm__ __volatile__(                                           \
-       "1:     llock   %0, [%1]        \n"                             \
+       "1:                             \n"                             \
+       PREFETCHW                                                       \
+       "       llock   %0, [%1]        \n"                             \
        "       " #asm_op " %0, %0, %2  \n"                             \
        "       scond   %0, [%1]        \n"                             \
        "       bnz     1b              \n"                             \