]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
x86/mm: Fix __split_large_page() to handle large PAT bit
authorToshi Kani <toshi.kani@hpe.com>
Thu, 17 Sep 2015 18:24:23 +0000 (12:24 -0600)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 22 Sep 2015 19:27:33 +0000 (21:27 +0200)
__split_large_page() is called from __change_page_attr() to change
the mapping attribute by splitting a given large page into smaller
pages.  This function uses pte_pfn() and pte_pgprot() for PUD/PMD,
which do not handle the large PAT bit properly.

Fix __split_large_page() by using the corresponding pud/pmd pfn/
pgprot interfaces.

Also remove '#ifdef CONFIG_X86_64', which is not necessary.

Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Konrad Wilk <konrad.wilk@oracle.com>
Cc: Robert Elliot <elliott@hpe.com>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1442514264-12475-11-git-send-email-toshi.kani@hpe.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/mm/pageattr.c

index 3f612713159a247628ea65f0bdacfa843fdb20a1..ab9465b9160f310d95ef814fd2a7e3328f861e2b 100644 (file)
@@ -605,7 +605,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
                   struct page *base)
 {
        pte_t *pbase = (pte_t *)page_address(base);
-       unsigned long pfn, pfninc = 1;
+       unsigned long ref_pfn, pfn, pfninc = 1;
        unsigned int i, level;
        pte_t *tmp;
        pgprot_t ref_prot;
@@ -622,26 +622,33 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
        }
 
        paravirt_alloc_pte(&init_mm, page_to_pfn(base));
-       ref_prot = pte_pgprot(pte_clrhuge(*kpte));
 
-       /* promote PAT bit to correct position */
-       if (level == PG_LEVEL_2M)
+       switch (level) {
+       case PG_LEVEL_2M:
+               ref_prot = pmd_pgprot(*(pmd_t *)kpte);
+               /* clear PSE and promote PAT bit to correct position */
                ref_prot = pgprot_large_2_4k(ref_prot);
+               ref_pfn = pmd_pfn(*(pmd_t *)kpte);
+               break;
 
-#ifdef CONFIG_X86_64
-       if (level == PG_LEVEL_1G) {
+       case PG_LEVEL_1G:
+               ref_prot = pud_pgprot(*(pud_t *)kpte);
+               ref_pfn = pud_pfn(*(pud_t *)kpte);
                pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
+
                /*
-                * Set the PSE flags only if the PRESENT flag is set
+                * Clear the PSE flags if the PRESENT flag is not set
                 * otherwise pmd_present/pmd_huge will return true
                 * even on a non present pmd.
                 */
-               if (pgprot_val(ref_prot) & _PAGE_PRESENT)
-                       pgprot_val(ref_prot) |= _PAGE_PSE;
-               else
+               if (!(pgprot_val(ref_prot) & _PAGE_PRESENT))
                        pgprot_val(ref_prot) &= ~_PAGE_PSE;
+               break;
+
+       default:
+               spin_unlock(&pgd_lock);
+               return 1;
        }
-#endif
 
        /*
         * Set the GLOBAL flags only if the PRESENT flag is set
@@ -657,7 +664,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
        /*
         * Get the target pfn from the original entry:
         */
-       pfn = pte_pfn(*kpte);
+       pfn = ref_pfn;
        for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
                set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot)));