]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
x86: Avoid tlbstate lock if not enough cpus
authorShaohua Li <shaohua.li@intel.com>
Mon, 17 Jan 2011 02:52:10 +0000 (10:52 +0800)
committerIngo Molnar <mingo@elte.hu>
Mon, 14 Feb 2011 12:03:08 +0000 (13:03 +0100)
This one isn't related to previous patch. If online cpus are
below NUM_INVALIDATE_TLB_VECTORS, we don't need the lock. The
comments in the code declares we don't need the check, but a hot
lock still needs an atomic operation and expensive, so add the
check here.

Uses nr_cpu_ids here as suggested by Eric Dumazet.

Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Andi Kleen <andi@firstfloor.org>
LKML-Reference: <1295232730.1949.710.camel@sli10-conroe>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/mm/tlb.c

index 6acc724d5d8ff759f93290a2591c84945f9e6bd2..55272d7c3b0b2a820c7b82e30f20c748ab52f7e1 100644 (file)
@@ -179,12 +179,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
        sender = this_cpu_read(tlb_vector_offset);
        f = &flush_state[sender];
 
-       /*
-        * Could avoid this lock when
-        * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
-        * probably not worth checking this for a cache-hot lock.
-        */
-       raw_spin_lock(&f->tlbstate_lock);
+       if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
+               raw_spin_lock(&f->tlbstate_lock);
 
        f->flush_mm = mm;
        f->flush_va = va;
@@ -202,7 +198,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
 
        f->flush_mm = NULL;
        f->flush_va = 0;
-       raw_spin_unlock(&f->tlbstate_lock);
+       if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
+               raw_spin_unlock(&f->tlbstate_lock);
 }
 
 void native_flush_tlb_others(const struct cpumask *cpumask,