]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 14 Apr 2013 18:13:24 +0000 (11:13 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 14 Apr 2013 18:13:24 +0000 (11:13 -0700)
Pull x86 fixes from Ingo Molnar:
 "Misc fixes"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm: Flush lazy MMU when DEBUG_PAGEALLOC is set
  x86/mm/cpa/selftest: Fix false positive in CPA self test
  x86/mm/cpa: Convert noop to functional fix
  x86, mm: Patch out arch_flush_lazy_mmu_mode() when running on bare metal
  x86, mm, paravirt: Fix vmalloc_fault oops during lazy MMU updates

arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/kernel/paravirt.c
arch/x86/lguest/boot.c
arch/x86/mm/fault.c
arch/x86/mm/pageattr-test.c
arch/x86/mm/pageattr.c
arch/x86/xen/mmu.c

index 5edd1742cfd02a9368ca28e604366fb428904ad8..7361e47db79f50158ada1cdde859c4871b99ee51 100644 (file)
@@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void)
        PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
 }
 
-void arch_flush_lazy_mmu_mode(void);
+static inline void arch_flush_lazy_mmu_mode(void)
+{
+       PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
+}
 
 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
                                phys_addr_t phys, pgprot_t flags)
index 142236ed83af580c06e2e00893501e3fc38b0e65..b3b0ec1dac86d91ac6289b961afefe65f1f48d11 100644 (file)
@@ -91,6 +91,7 @@ struct pv_lazy_ops {
        /* Set deferred update mode, used for batching operations. */
        void (*enter)(void);
        void (*leave)(void);
+       void (*flush)(void);
 };
 
 struct pv_time_ops {
@@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next);
 
 void paravirt_enter_lazy_mmu(void);
 void paravirt_leave_lazy_mmu(void);
+void paravirt_flush_lazy_mmu(void);
 
 void _paravirt_nop(void);
 u32 _paravirt_ident_32(u32);
index 17fff18a103104431c0e10e1543af6f027f64f72..8bfb335f74bb377776adb4b6b1da98dc17ed82ae 100644 (file)
@@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void)
        leave_lazy(PARAVIRT_LAZY_MMU);
 }
 
+void paravirt_flush_lazy_mmu(void)
+{
+       preempt_disable();
+
+       if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+               arch_leave_lazy_mmu_mode();
+               arch_enter_lazy_mmu_mode();
+       }
+
+       preempt_enable();
+}
+
 void paravirt_start_context_switch(struct task_struct *prev)
 {
        BUG_ON(preemptible());
@@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
        return this_cpu_read(paravirt_lazy_mode);
 }
 
-void arch_flush_lazy_mmu_mode(void)
-{
-       preempt_disable();
-
-       if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
-               arch_leave_lazy_mmu_mode();
-               arch_enter_lazy_mmu_mode();
-       }
-
-       preempt_enable();
-}
-
 struct pv_info pv_info = {
        .name = "bare hardware",
        .paravirt_enabled = 0,
@@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = {
        .lazy_mode = {
                .enter = paravirt_nop,
                .leave = paravirt_nop,
+               .flush = paravirt_nop,
        },
 
        .set_fixmap = native_set_fixmap,
index 1cbd89ca5569f2ce5d76a4bed1f21d6d52ed63e5..7114c63f047d06432a453884c4500474ea9681ea 100644 (file)
@@ -1334,6 +1334,7 @@ __init void lguest_init(void)
        pv_mmu_ops.read_cr3 = lguest_read_cr3;
        pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
        pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
+       pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
        pv_mmu_ops.pte_update = lguest_pte_update;
        pv_mmu_ops.pte_update_defer = lguest_pte_update;
 
index 2b97525246d43c3397f9d2149adf859b24b2842b..0e883364abb5752d4ac07b031d241d6bd00585ab 100644 (file)
@@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
        if (pgd_none(*pgd_ref))
                return -1;
 
-       if (pgd_none(*pgd))
+       if (pgd_none(*pgd)) {
                set_pgd(pgd, *pgd_ref);
-       else
+               arch_flush_lazy_mmu_mode();
+       } else {
                BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+       }
 
        /*
         * Below here mismatches are bugs because these lower tables
index b0086567271c9956b2196e3ba302e4a77009e811..0e38951e65eb0940bf57a3f6d8c8d01dff0674c7 100644 (file)
@@ -68,7 +68,7 @@ static int print_split(struct split_state *s)
                        s->gpg++;
                        i += GPS/PAGE_SIZE;
                } else if (level == PG_LEVEL_2M) {
-                       if (!(pte_val(*pte) & _PAGE_PSE)) {
+                       if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) {
                                printk(KERN_ERR
                                        "%lx level %d but not PSE %Lx\n",
                                        addr, level, (u64)pte_val(*pte));
index 091934e1d0d97ca26570eb9962aa1890e08e7bd8..fb4e73ec24d8d1b8b894b6c09f7c082a49dc1dcf 100644 (file)
@@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
         * We are safe now. Check whether the new pgprot is the same:
         */
        old_pte = *kpte;
-       old_prot = new_prot = req_prot = pte_pgprot(old_pte);
+       old_prot = req_prot = pte_pgprot(old_pte);
 
        pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
        pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
@@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
         * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
         * for the ancient hardware that doesn't support it.
         */
-       if (pgprot_val(new_prot) & _PAGE_PRESENT)
-               pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
+       if (pgprot_val(req_prot) & _PAGE_PRESENT)
+               pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
        else
-               pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
+               pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
 
-       new_prot = canon_pgprot(new_prot);
+       req_prot = canon_pgprot(req_prot);
 
        /*
         * old_pte points to the large page base address. So we need
@@ -1413,6 +1413,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
         * but that can deadlock->flush only current cpu:
         */
        __flush_tlb_all();
+
+       arch_flush_lazy_mmu_mode();
 }
 
 #ifdef CONFIG_HIBERNATION
index a4ea92477e016e252ca96037a9654fed4e399e57..e006c18d288a39ebec3343b2c240eced7a6fec34 100644 (file)
@@ -2200,6 +2200,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
        .lazy_mode = {
                .enter = paravirt_enter_lazy_mmu,
                .leave = xen_leave_lazy_mmu,
+               .flush = paravirt_flush_lazy_mmu,
        },
 
        .set_fixmap = xen_set_fixmap,