]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 4 Nov 2015 05:23:56 +0000 (21:23 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 4 Nov 2015 05:23:56 +0000 (21:23 -0800)
Pull x86 mm changes from Ingo Molnar:
 "The main changes are: continued PAT work by Toshi Kani, plus a new
  boot time warning about insecure RWX kernel mappings, by Stephen
  Smalley.

  The new CONFIG_DEBUG_WX=y warning is marked default-y if
  CONFIG_DEBUG_RODATA=y is already eanbled, as a special exception, as
  these bugs are hard to notice and this check already found several
  live bugs"

* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm: Warn on W^X mappings
  x86/mm: Fix no-change case in try_preserve_large_page()
  x86/mm: Fix __split_large_page() to handle large PAT bit
  x86/mm: Fix try_preserve_large_page() to handle large PAT bit
  x86/mm: Fix gup_huge_p?d() to handle large PAT bit
  x86/mm: Fix slow_virt_to_phys() to handle large PAT bit
  x86/mm: Fix page table dump to show PAT bit
  x86/asm: Add pud_pgprot() and pmd_pgprot()
  x86/asm: Fix pud/pmd interfaces to handle large PAT bit
  x86/asm: Add pud/pmd mask interfaces to handle large PAT bit
  x86/asm: Move PUD_PAGE macros to page_types.h
  x86/vdso32: Define PGTABLE_LEVELS to 32bit VDSO

1  2 
arch/x86/mm/init_64.c
arch/x86/mm/pageattr.c

diff --combined arch/x86/mm/init_64.c
index df48430c279b8688996b9f0074c08b1ce139af06,f8b1573667006ce5dccda5c1da973e60e6087789..5ed62eff31bd5e7fe2e711a4204afbaad60e2437
@@@ -1132,7 -1132,7 +1132,7 @@@ void mark_rodata_ro(void
         * has been zapped already via cleanup_highmem().
         */
        all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
 -      set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
 +      set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
  
        rodata_test();
  
        free_init_pages("unused kernel",
                        (unsigned long) __va(__pa_symbol(rodata_end)),
                        (unsigned long) __va(__pa_symbol(_sdata)));
+       debug_checkwx();
  }
  
  #endif
diff --combined arch/x86/mm/pageattr.c
index 050a092b8d9a2628c03227d06c4d8a8314d5433a,e2621a8e8213c69cb170097d3a41d2a10f32f1fd..a3137a4feed15ed0d58189411e4e1ae2d9236e8a
@@@ -414,18 -414,28 +414,28 @@@ pmd_t *lookup_pmd_address(unsigned lon
  phys_addr_t slow_virt_to_phys(void *__virt_addr)
  {
        unsigned long virt_addr = (unsigned long)__virt_addr;
-       phys_addr_t phys_addr;
-       unsigned long offset;
+       unsigned long phys_addr, offset;
        enum pg_level level;
-       unsigned long pmask;
        pte_t *pte;
  
        pte = lookup_address(virt_addr, &level);
        BUG_ON(!pte);
-       pmask = page_level_mask(level);
-       offset = virt_addr & ~pmask;
-       phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
-       return (phys_addr | offset);
+       switch (level) {
+       case PG_LEVEL_1G:
+               phys_addr = pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
+               offset = virt_addr & ~PUD_PAGE_MASK;
+               break;
+       case PG_LEVEL_2M:
+               phys_addr = pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
+               offset = virt_addr & ~PMD_PAGE_MASK;
+               break;
+       default:
+               phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
+               offset = virt_addr & ~PAGE_MASK;
+       }
+       return (phys_addr_t)(phys_addr | offset);
  }
  EXPORT_SYMBOL_GPL(slow_virt_to_phys);
  
@@@ -458,7 -468,7 +468,7 @@@ static in
  try_preserve_large_page(pte_t *kpte, unsigned long address,
                        struct cpa_data *cpa)
  {
-       unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn;
+       unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn, old_pfn;
        pte_t new_pte, old_pte, *tmp;
        pgprot_t old_prot, new_prot, req_prot;
        int i, do_split = 1;
  
        switch (level) {
        case PG_LEVEL_2M:
- #ifdef CONFIG_X86_64
+               old_prot = pmd_pgprot(*(pmd_t *)kpte);
+               old_pfn = pmd_pfn(*(pmd_t *)kpte);
+               break;
        case PG_LEVEL_1G:
- #endif
-               psize = page_level_size(level);
-               pmask = page_level_mask(level);
+               old_prot = pud_pgprot(*(pud_t *)kpte);
+               old_pfn = pud_pfn(*(pud_t *)kpte);
                break;
        default:
                do_split = -EINVAL;
                goto out_unlock;
        }
  
+       psize = page_level_size(level);
+       pmask = page_level_mask(level);
        /*
         * Calculate the number of pages, which fit into this large
         * page starting at address:
         * up accordingly.
         */
        old_pte = *kpte;
-       old_prot = req_prot = pgprot_large_2_4k(pte_pgprot(old_pte));
+       req_prot = pgprot_large_2_4k(old_prot);
  
        pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
        pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
        req_prot = canon_pgprot(req_prot);
  
        /*
-        * old_pte points to the large page base address. So we need
+        * old_pfn points to the large page base pfn. So we need
         * to add the offset of the virtual address:
         */
-       pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
+       pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT);
        cpa->pfn = pfn;
  
        new_prot = static_protections(req_prot, address, pfn);
         * the pages in the range we try to preserve:
         */
        addr = address & pmask;
-       pfn = pte_pfn(old_pte);
+       pfn = old_pfn;
        for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) {
                pgprot_t chk_prot = static_protections(req_prot, addr, pfn);
  
                 * The address is aligned and the number of pages
                 * covers the full page.
                 */
-               new_pte = pfn_pte(pte_pfn(old_pte), new_prot);
+               new_pte = pfn_pte(old_pfn, new_prot);
                __set_pmd_pte(kpte, address, new_pte);
                cpa->flags |= CPA_FLUSHTLB;
                do_split = 0;
@@@ -591,7 -605,7 +605,7 @@@ __split_large_page(struct cpa_data *cpa
                   struct page *base)
  {
        pte_t *pbase = (pte_t *)page_address(base);
-       unsigned long pfn, pfninc = 1;
+       unsigned long ref_pfn, pfn, pfninc = 1;
        unsigned int i, level;
        pte_t *tmp;
        pgprot_t ref_prot;
        }
  
        paravirt_alloc_pte(&init_mm, page_to_pfn(base));
-       ref_prot = pte_pgprot(pte_clrhuge(*kpte));
  
-       /* promote PAT bit to correct position */
-       if (level == PG_LEVEL_2M)
+       switch (level) {
+       case PG_LEVEL_2M:
+               ref_prot = pmd_pgprot(*(pmd_t *)kpte);
+               /* clear PSE and promote PAT bit to correct position */
                ref_prot = pgprot_large_2_4k(ref_prot);
+               ref_pfn = pmd_pfn(*(pmd_t *)kpte);
+               break;
  
- #ifdef CONFIG_X86_64
-       if (level == PG_LEVEL_1G) {
+       case PG_LEVEL_1G:
+               ref_prot = pud_pgprot(*(pud_t *)kpte);
+               ref_pfn = pud_pfn(*(pud_t *)kpte);
                pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
                /*
-                * Set the PSE flags only if the PRESENT flag is set
+                * Clear the PSE flags if the PRESENT flag is not set
                 * otherwise pmd_present/pmd_huge will return true
                 * even on a non present pmd.
                 */
-               if (pgprot_val(ref_prot) & _PAGE_PRESENT)
-                       pgprot_val(ref_prot) |= _PAGE_PSE;
-               else
+               if (!(pgprot_val(ref_prot) & _PAGE_PRESENT))
                        pgprot_val(ref_prot) &= ~_PAGE_PSE;
+               break;
+       default:
+               spin_unlock(&pgd_lock);
+               return 1;
        }
- #endif
  
        /*
         * Set the GLOBAL flags only if the PRESENT flag is set
        /*
         * Get the target pfn from the original entry:
         */
-       pfn = pte_pfn(*kpte);
+       pfn = ref_pfn;
        for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
                set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot)));
  
 -      if (pfn_range_is_mapped(PFN_DOWN(__pa(address)),
 -                              PFN_DOWN(__pa(address)) + 1))
 -              split_page_count(level);
 +      if (virt_addr_valid(address)) {
 +              unsigned long pfn = PFN_DOWN(__pa(address));
 +
 +              if (pfn_range_is_mapped(pfn, pfn + 1))
 +                      split_page_count(level);
 +      }
  
        /*
         * Install the new, split up pagetable.