]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
[PATCH] ppc64: reverse prediction on spinlock busy loop code
authorJake Moilanen <moilanen@austin.ibm.com>
Sun, 1 May 2005 15:58:47 +0000 (08:58 -0700)
committerLinus Torvalds <torvalds@ppc970.osdl.org>
Sun, 1 May 2005 15:58:47 +0000 (08:58 -0700)
On our raw spinlocks, we currently have an attempt at the lock, and if we do
not get it we enter a spin loop.  This spinloop will likely continue for
awhile, and we pridict likely.

Shouldn't we predict that we will get out of the loop so our next instructions
are already prefetched.  Even when we miss because the lock is still held, it
won't matter since we are waiting anyways.

I did a couple quick benchmarks, but the results are inconclusive.

16-way 690 running specjbb with original code
# ./specjbb 3000 16 1 1 19 30 120
    ...
Valid run, Score is 59282

16-way 690 running specjbb with unlikely code
# ./specjbb 3000 16 1 1 19 30 120
    ...
Valid run, Score is 59541

I saw a smaller increase on a JS20 (~1.6%)

JS20 specjbb w/ original code
# ./specjbb 400 2 1 1 19 30 120
   ...
Valid run, Score is 20460

JS20 specjbb w/ unlikely code
# ./specjbb 400 2 1 1 19 30 120
   ...
Valid run, Score is 20803

Anton said:

Mispredicting the spinlock busy loop also means we slow down the rate at which
we do the loads which can be good for heavily contended locks.

Note: There are some gcc issues with our default build and branch prediction,
but a CONFIG_POWER4_ONLY build should emit them correctly.  I'm working with
Alan Modra on it now.

Signed-off-by: Jake Moilanen <moilanen@austin.ibm.com>
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
include/asm-ppc64/spinlock.h

index a9b2a1162cf718f0529775cfd9788b4eee2013f9..acd11564dd752d64a8e29951e3beb55eee0f09c4 100644 (file)
@@ -110,7 +110,7 @@ static void __inline__ _raw_spin_lock(spinlock_t *lock)
                        HMT_low();
                        if (SHARED_PROCESSOR)
                                __spin_yield(lock);
-               } while (likely(lock->lock != 0));
+               } while (unlikely(lock->lock != 0));
                HMT_medium();
        }
 }
@@ -128,7 +128,7 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag
                        HMT_low();
                        if (SHARED_PROCESSOR)
                                __spin_yield(lock);
-               } while (likely(lock->lock != 0));
+               } while (unlikely(lock->lock != 0));
                HMT_medium();
                local_irq_restore(flags_dis);
        }
@@ -194,7 +194,7 @@ static void __inline__ _raw_read_lock(rwlock_t *rw)
                        HMT_low();
                        if (SHARED_PROCESSOR)
                                __rw_yield(rw);
-               } while (likely(rw->lock < 0));
+               } while (unlikely(rw->lock < 0));
                HMT_medium();
        }
 }
@@ -251,7 +251,7 @@ static void __inline__ _raw_write_lock(rwlock_t *rw)
                        HMT_low();
                        if (SHARED_PROCESSOR)
                                __rw_yield(rw);
-               } while (likely(rw->lock != 0));
+               } while (unlikely(rw->lock != 0));
                HMT_medium();
        }
 }