]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
x86: Store a per-cpu shadow copy of CR4
authorAndy Lutomirski <luto@amacapital.net>
Fri, 24 Oct 2014 22:58:08 +0000 (15:58 -0700)
committerIngo Molnar <mingo@kernel.org>
Wed, 4 Feb 2015 11:10:42 +0000 (12:10 +0100)
Context switches and TLB flushes can change individual bits of CR4.
CR4 reads take several cycles, so store a shadow copy of CR4 in a
per-cpu variable.

To avoid wasting a cache line, I added the CR4 shadow to
cpu_tlbstate, which is already touched in switch_mm.  The heaviest
users of the cr4 shadow will be switch_mm and __switch_to_xtra, and
__switch_to_xtra is called shortly after switch_mm during context
switch, so the cacheline is likely to be hot.

Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Vince Weaver <vince@deater.net>
Cc: "hillf.zj" <hillf.zj@alibaba-inc.com>
Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/3a54dd3353fffbf84804398e00dfdc5b7c1afd7d.1414190806.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
20 files changed:
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/special_insns.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/virtext.h
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mtrr/cyrix.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/head32.c
arch/x86/kernel/head64.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/setup.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/mm/fault.c
arch/x86/mm/init.c
arch/x86/mm/tlb.c
arch/x86/power/cpu.c
arch/x86/realmode/init.c

index 32444ae939ca90494e4cf913eb9ea8be1113f6a4..965c47d254aa0f68ea83e4dc35cd983cc9ff789b 100644 (file)
@@ -80,16 +80,16 @@ static inline void write_cr3(unsigned long x)
        PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
 }
 
-static inline unsigned long read_cr4(void)
+static inline unsigned long __read_cr4(void)
 {
        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
 }
-static inline unsigned long read_cr4_safe(void)
+static inline unsigned long __read_cr4_safe(void)
 {
        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
 }
 
-static inline void write_cr4(unsigned long x)
+static inline void __write_cr4(unsigned long x)
 {
        PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
 }
index e820c080a4e99e45354fc7b6e5720d9f1933b241..6a4b00fafb003cbcf4e2bdc62a244ee71dc75a45 100644 (file)
@@ -137,17 +137,17 @@ static inline void write_cr3(unsigned long x)
        native_write_cr3(x);
 }
 
-static inline unsigned long read_cr4(void)
+static inline unsigned long __read_cr4(void)
 {
        return native_read_cr4();
 }
 
-static inline unsigned long read_cr4_safe(void)
+static inline unsigned long __read_cr4_safe(void)
 {
        return native_read_cr4_safe();
 }
 
-static inline void write_cr4(unsigned long x)
+static inline void __write_cr4(unsigned long x)
 {
        native_write_cr4(x);
 }
index fc0c4bc356ce45ad663eae11a2d433a118ef38c3..cd791948b286a13a7c5cf35e71662cb8066d697a 100644 (file)
 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
 #endif
 
+struct tlb_state {
+#ifdef CONFIG_SMP
+       struct mm_struct *active_mm;
+       int state;
+#endif
+
+       /*
+        * Access to this CR4 shadow and to H/W CR4 is protected by
+        * disabling interrupts when modifying either one.
+        */
+       unsigned long cr4;
+};
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
+
+/* Initialize cr4 shadow for this CPU. */
+static inline void cr4_init_shadow(void)
+{
+       this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
+}
+
 /* Set in this cpu's CR4. */
 static inline void cr4_set_bits(unsigned long mask)
 {
        unsigned long cr4;
 
-       cr4 = read_cr4();
-       cr4 |= mask;
-       write_cr4(cr4);
+       cr4 = this_cpu_read(cpu_tlbstate.cr4);
+       if ((cr4 | mask) != cr4) {
+               cr4 |= mask;
+               this_cpu_write(cpu_tlbstate.cr4, cr4);
+               __write_cr4(cr4);
+       }
 }
 
 /* Clear in this cpu's CR4. */
@@ -30,9 +53,18 @@ static inline void cr4_clear_bits(unsigned long mask)
 {
        unsigned long cr4;
 
-       cr4 = read_cr4();
-       cr4 &= ~mask;
-       write_cr4(cr4);
+       cr4 = this_cpu_read(cpu_tlbstate.cr4);
+       if ((cr4 & ~mask) != cr4) {
+               cr4 &= ~mask;
+               this_cpu_write(cpu_tlbstate.cr4, cr4);
+               __write_cr4(cr4);
+       }
+}
+
+/* Read the CR4 shadow. */
+static inline unsigned long cr4_read_shadow(void)
+{
+       return this_cpu_read(cpu_tlbstate.cr4);
 }
 
 /*
@@ -61,7 +93,7 @@ static inline void __native_flush_tlb_global_irq_disabled(void)
 {
        unsigned long cr4;
 
-       cr4 = native_read_cr4();
+       cr4 = this_cpu_read(cpu_tlbstate.cr4);
        /* clear PGE */
        native_write_cr4(cr4 & ~X86_CR4_PGE);
        /* write old PGE again and flush TLBs */
@@ -221,12 +253,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
 #define TLBSTATE_OK    1
 #define TLBSTATE_LAZY  2
 
-struct tlb_state {
-       struct mm_struct *active_mm;
-       int state;
-};
-DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
-
 static inline void reset_lazy_tlbstate(void)
 {
        this_cpu_write(cpu_tlbstate.state, 0);
index f41e19ca717b0bbffb5d9f0f7e3402113a77f76e..cce9ee68e335f86e4bf8ca54784924c4f7202196 100644 (file)
@@ -46,7 +46,7 @@ static inline void cpu_vmxoff(void)
 
 static inline int cpu_vmx_enabled(void)
 {
-       return read_cr4() & X86_CR4_VMXE;
+       return __read_cr4() & X86_CR4_VMXE;
 }
 
 /** Disable VMX if it is enabled on the current CPU
index 31368207837c2fbcd93f73d94ccf9c2d572d5cf5..d1daead5fcddd57ab4cd0675315420bf52608584 100644 (file)
@@ -78,7 +78,7 @@ int x86_acpi_suspend_lowlevel(void)
 
        header->pmode_cr0 = read_cr0();
        if (__this_cpu_read(cpu_info.cpuid_level) >= 0) {
-               header->pmode_cr4 = read_cr4();
+               header->pmode_cr4 = __read_cr4();
                header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4);
        }
        if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
index 9d8fc49f092277719363234939ea737e1a6a0b50..07f2fc3c13a4d614b76fdad3b99ee6ec8e9c3683 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/archrandom.h>
 #include <asm/hypervisor.h>
 #include <asm/processor.h>
+#include <asm/tlbflush.h>
 #include <asm/debugreg.h>
 #include <asm/sections.h>
 #include <asm/vsyscall.h>
@@ -1293,6 +1294,12 @@ void cpu_init(void)
 
        wait_for_master_cpu(cpu);
 
+       /*
+        * Initialize the CR4 shadow before doing anything that could
+        * try to read it.
+        */
+       cr4_init_shadow();
+
        /*
         * Load microcode on this cpu if a valid microcode is available.
         * This is early microcode loading procedure.
index 9e451b0876b513ff34c38ba5cad0862b1f5fc3db..f8c81ba0b4651c02cd31f9a1117e64192e64e1d0 100644 (file)
@@ -138,8 +138,8 @@ static void prepare_set(void)
 
        /*  Save value of CR4 and clear Page Global Enable (bit 7)  */
        if (cpu_has_pge) {
-               cr4 = read_cr4();
-               write_cr4(cr4 & ~X86_CR4_PGE);
+               cr4 = __read_cr4();
+               __write_cr4(cr4 & ~X86_CR4_PGE);
        }
 
        /*
@@ -171,7 +171,7 @@ static void post_set(void)
 
        /* Restore value of CR4 */
        if (cpu_has_pge)
-               write_cr4(cr4);
+               __write_cr4(cr4);
 }
 
 static void cyrix_set_arr(unsigned int reg, unsigned long base,
index 0e25a1bc5ab5cfbbf21484ce268ad17ed48844ec..7d74f7b3c6ba49ee5f3ef9baa93e9eb03a906992 100644 (file)
@@ -678,8 +678,8 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
 
        /* Save value of CR4 and clear Page Global Enable (bit 7) */
        if (cpu_has_pge) {
-               cr4 = read_cr4();
-               write_cr4(cr4 & ~X86_CR4_PGE);
+               cr4 = __read_cr4();
+               __write_cr4(cr4 & ~X86_CR4_PGE);
        }
 
        /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
@@ -708,7 +708,7 @@ static void post_set(void) __releases(set_atomicity_lock)
 
        /* Restore value of CR4 */
        if (cpu_has_pge)
-               write_cr4(cr4);
+               __write_cr4(cr4);
        raw_spin_unlock(&set_atomicity_lock);
 }
 
index d6c1b983699576523aacb5b8066d48311eb94a67..2911ef3a9f1c7bf11a2508a928cb216c5f48b6bc 100644 (file)
@@ -31,6 +31,7 @@ static void __init i386_default_early_setup(void)
 
 asmlinkage __visible void __init i386_start_kernel(void)
 {
+       cr4_init_shadow();
        sanitize_boot_params(&boot_params);
 
        /* Call the subarch specific early setup function */
index eda1a865641e2e86f01961265619ab0484b96631..3b241f0ca005fcfc9a157d2bdff04ad69f0d34f8 100644 (file)
@@ -155,6 +155,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
                                (__START_KERNEL & PGDIR_MASK)));
        BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
 
+       cr4_init_shadow();
+
        /* Kill off the identity-map trampoline */
        reset_early_page_tables();
 
index 8f3ebfe710d0715d9b876fa6f0d551933f30a225..603c4f99cb5a17f83e65c3066a5642a4bb9d0f42 100644 (file)
@@ -101,7 +101,7 @@ void __show_regs(struct pt_regs *regs, int all)
        cr0 = read_cr0();
        cr2 = read_cr2();
        cr3 = read_cr3();
-       cr4 = read_cr4_safe();
+       cr4 = __read_cr4_safe();
        printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
                        cr0, cr2, cr3, cr4);
 
index 5a2c02913af3bd43dfc1be40947f176e0542422f..67fcc43577d279faa02941dd56dbf36f270a9832 100644 (file)
@@ -93,7 +93,7 @@ void __show_regs(struct pt_regs *regs, int all)
        cr0 = read_cr0();
        cr2 = read_cr2();
        cr3 = read_cr3();
-       cr4 = read_cr4();
+       cr4 = __read_cr4();
 
        printk(KERN_DEFAULT "FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
               fs, fsindex, gs, gsindex, shadowgs);
index ab4734e5411d76daa06624afc6d1eb4325d1a128..04e6c62f1a9386535f9b1e4f5156e37ec14f402d 100644 (file)
@@ -1178,7 +1178,7 @@ void __init setup_arch(char **cmdline_p)
 
        if (boot_cpu_data.cpuid_level >= 0) {
                /* A CPU has %cr4 if and only if it has CPUID */
-               mmu_cr4_features = read_cr4();
+               mmu_cr4_features = __read_cr4();
                if (trampoline_cr4_features)
                        *trampoline_cr4_features = mmu_cr4_features;
        }
index 41dd0387cccb639b49fcb99c99035d3691f2464a..496a54839968e4c8389b67c676f2a3577a3bc785 100644 (file)
@@ -1583,7 +1583,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 
 static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
-       unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
+       unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
        unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
 
        if (cr4 & X86_CR4_VMXE)
index db77537013d1c246345a1c381e961b4abbed7b26..8dca6ccbb9cefcdb960d8e4ceb62366ef4729ebc 100644 (file)
@@ -2785,7 +2785,7 @@ static int hardware_enable(void)
        u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
        u64 old, test_bits;
 
-       if (read_cr4() & X86_CR4_VMXE)
+       if (cr4_read_shadow() & X86_CR4_VMXE)
                return -EBUSY;
 
        INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
@@ -4255,7 +4255,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
        vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
 
        /* Save the most likely value for this task's CR4 in the VMCS. */
-       cr4 = read_cr4();
+       cr4 = cr4_read_shadow();
        vmcs_writel(HOST_CR4, cr4);                     /* 22.2.3, 22.2.5 */
        vmx->host_state.vmcs_host_cr4 = cr4;
 
@@ -7784,7 +7784,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
                vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
 
-       cr4 = read_cr4();
+       cr4 = cr4_read_shadow();
        if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
                vmcs_writel(HOST_CR4, cr4);
                vmx->host_state.vmcs_host_cr4 = cr4;
index e3ff27a5b6348ffb2dcff6f592abafe48b6b6396..ede025fb46f137ed7576cd5a06547264356c25b0 100644 (file)
@@ -600,7 +600,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
                        printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
                if (pte && pte_present(*pte) && pte_exec(*pte) &&
                                (pgd_flags(*pgd) & _PAGE_USER) &&
-                               (read_cr4() & X86_CR4_SMEP))
+                               (__read_cr4() & X86_CR4_SMEP))
                        printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
        }
 
index d4eddbd92c287c73393e3dea524c4baa2a578f5b..a74aa0fd185332a110eb2d7b04607c0fabd4dc30 100644 (file)
@@ -713,6 +713,15 @@ void __init zone_sizes_init(void)
        free_area_init_nodes(max_zone_pfns);
 }
 
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
+#ifdef CONFIG_SMP
+       .active_mm = &init_mm,
+       .state = 0,
+#endif
+       .cr4 = ~0UL,    /* fail hard if we screw up cr4 shadow initialization */
+};
+EXPORT_SYMBOL_GPL(cpu_tlbstate);
+
 void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
 {
        /* entry 0 MUST be WB (hardwired to speed up translations) */
index ee61c36d64f84dd944873ec0acf80b4f8ad06da7..3250f2371aea5c9f2c8f8f19d4f6535627e0e188 100644 (file)
@@ -14,9 +14,6 @@
 #include <asm/uv/uv.h>
 #include <linux/debugfs.h>
 
-DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
-                       = { &init_mm, 0, };
-
 /*
  *     Smarter SMP flushing macros.
  *             c/o Linus Torvalds.
index 6ec7910f59bfe2e0b831eedd772b36a5c5cebc35..3e32ed5648a03894604976c4a9b2ae5c155c1750 100644 (file)
@@ -105,11 +105,8 @@ static void __save_processor_state(struct saved_context *ctxt)
        ctxt->cr0 = read_cr0();
        ctxt->cr2 = read_cr2();
        ctxt->cr3 = read_cr3();
-#ifdef CONFIG_X86_32
-       ctxt->cr4 = read_cr4_safe();
-#else
-/* CONFIG_X86_64 */
-       ctxt->cr4 = read_cr4();
+       ctxt->cr4 = __read_cr4_safe();
+#ifdef CONFIG_X86_64
        ctxt->cr8 = read_cr8();
 #endif
        ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
@@ -175,12 +172,12 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
        /* cr4 was introduced in the Pentium CPU */
 #ifdef CONFIG_X86_32
        if (ctxt->cr4)
-               write_cr4(ctxt->cr4);
+               __write_cr4(ctxt->cr4);
 #else
 /* CONFIG X86_64 */
        wrmsrl(MSR_EFER, ctxt->efer);
        write_cr8(ctxt->cr8);
-       write_cr4(ctxt->cr4);
+       __write_cr4(ctxt->cr4);
 #endif
        write_cr3(ctxt->cr3);
        write_cr2(ctxt->cr2);
index bad628a620c4ef7647986bf22f63402b7d451be2..0b7a63d9844038a02492ae5ddf0847b51d51bdd7 100644 (file)
@@ -81,7 +81,7 @@ void __init setup_real_mode(void)
 
        trampoline_header->start = (u64) secondary_startup_64;
        trampoline_cr4_features = &trampoline_header->cr4;
-       *trampoline_cr4_features = read_cr4();
+       *trampoline_cr4_features = __read_cr4();
 
        trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
        trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;