]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
mm: introduce mm_forbids_zeropage function
authorDominik Dingel <dingel@linux.vnet.ibm.com>
Thu, 23 Oct 2014 10:07:44 +0000 (12:07 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Mon, 27 Oct 2014 12:27:24 +0000 (13:27 +0100)
Add a new function stub to allow architectures to disable for
an mm_structthe backing of non-present, anonymous pages with
read-only empty zero pages.

Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
include/linux/mm.h
mm/huge_memory.c
mm/memory.c

index 27eb1bfbe7049adbaac4d90b8f2f77f95c9741bc..ab7dadca4ea585cb341f5a0bb29c50d2bdfb6346 100644 (file)
@@ -56,6 +56,17 @@ extern int sysctl_legacy_va_layout;
 #define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
 #endif
 
+/*
+ * To prevent common memory management code establishing
+ * a zero page mapping on a read fault.
+ * This macro should be defined within <asm/pgtable.h>.
+ * s390 does this to prevent multiplexing of hardware bits
+ * related to the physical page in case of virtualization.
+ */
+#ifndef mm_forbids_zeropage
+#define mm_forbids_zeropage(X) (0)
+#endif
+
 extern unsigned long sysctl_user_reserve_kbytes;
 extern unsigned long sysctl_admin_reserve_kbytes;
 
index 74c78aa8bc2fa68454928b09f34a7b97f3419e05..7e9c15cb93a9dd4a580d2d15a463f1030c941ff5 100644 (file)
@@ -805,7 +805,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                return VM_FAULT_OOM;
        if (unlikely(khugepaged_enter(vma)))
                return VM_FAULT_OOM;
-       if (!(flags & FAULT_FLAG_WRITE) &&
+       if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) &&
                        transparent_hugepage_use_zero_page()) {
                spinlock_t *ptl;
                pgtable_t pgtable;
index 1cc6bfbd872ee17122b6e85859662696f595a363..d722d4f481c96a3cea0e0a1e4c4e21c41edc3db5 100644 (file)
@@ -2640,7 +2640,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                return VM_FAULT_SIGBUS;
 
        /* Use the zero-page for reads */
-       if (!(flags & FAULT_FLAG_WRITE)) {
+       if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
                entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
                                                vma->vm_page_prot));
                page_table = pte_offset_map_lock(mm, pmd, address, &ptl);