]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/memory.c
writeback: fix incorrect calculation of available memory for memcg domains
[karo-tx-linux.git] / mm / memory.c
index bb04d8f2f86c415c24a52dadd71623a2316454d1..9cb27470fee991cb874676bb0cbc0f694b5e1d36 100644 (file)
@@ -2426,8 +2426,6 @@ void unmap_mapping_range(struct address_space *mapping,
        if (details.last_index < details.first_index)
                details.last_index = ULONG_MAX;
 
-
-       /* DAX uses i_mmap_lock to serialise file truncate vs page fault */
        i_mmap_lock_write(mapping);
        if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
@@ -3015,9 +3013,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                } else {
                        /*
                         * The fault handler has no page to lock, so it holds
-                        * i_mmap_lock for read to protect against truncate.
+                        * i_mmap_lock for write to protect against truncate.
                         */
-                       i_mmap_unlock_read(vma->vm_file->f_mapping);
+                       i_mmap_unlock_write(vma->vm_file->f_mapping);
                }
                goto uncharge_out;
        }
@@ -3031,9 +3029,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        } else {
                /*
                 * The fault handler has no page to lock, so it holds
-                * i_mmap_lock for read to protect against truncate.
+                * i_mmap_lock for write to protect against truncate.
                 */
-               i_mmap_unlock_read(vma->vm_file->f_mapping);
+               i_mmap_unlock_write(vma->vm_file->f_mapping);
        }
        return ret;
 uncharge_out:
@@ -3232,6 +3230,27 @@ out:
        return 0;
 }
 
+static int create_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
+                       unsigned long address, pmd_t *pmd, unsigned int flags)
+{
+       if (vma_is_anonymous(vma))
+               return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags);
+       if (vma->vm_ops->pmd_fault)
+               return vma->vm_ops->pmd_fault(vma, address, pmd, flags);
+       return VM_FAULT_FALLBACK;
+}
+
+static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
+                       unsigned long address, pmd_t *pmd, pmd_t orig_pmd,
+                       unsigned int flags)
+{
+       if (vma_is_anonymous(vma))
+               return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd);
+       if (vma->vm_ops->pmd_fault)
+               return vma->vm_ops->pmd_fault(vma, address, pmd, flags);
+       return VM_FAULT_FALLBACK;
+}
+
 /*
  * These routines also need to handle stuff like marking pages dirty
  * and/or accessed for architectures that don't do it in hardware (most
@@ -3267,12 +3286,12 @@ static int handle_pte_fault(struct mm_struct *mm,
        barrier();
        if (!pte_present(entry)) {
                if (pte_none(entry)) {
-                       if (vma->vm_ops)
+                       if (vma_is_anonymous(vma))
+                               return do_anonymous_page(mm, vma, address,
+                                                        pte, pmd, flags);
+                       else
                                return do_fault(mm, vma, address, pte, pmd,
                                                flags, entry);
-
-                       return do_anonymous_page(mm, vma, address, pte, pmd,
-                                       flags);
                }
                return do_swap_page(mm, vma, address,
                                        pte, pmd, flags, entry);
@@ -3334,10 +3353,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!pmd)
                return VM_FAULT_OOM;
        if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
-               int ret = VM_FAULT_FALLBACK;
-               if (!vma->vm_ops)
-                       ret = do_huge_pmd_anonymous_page(mm, vma, address,
-                                       pmd, flags);
+               int ret = create_huge_pmd(mm, vma, address, pmd, flags);
                if (!(ret & VM_FAULT_FALLBACK))
                        return ret;
        } else {
@@ -3361,8 +3377,8 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                                             orig_pmd, pmd);
 
                        if (dirty && !pmd_write(orig_pmd)) {
-                               ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
-                                                         orig_pmd);
+                               ret = wp_huge_pmd(mm, vma, address, pmd,
+                                                       orig_pmd, flags);
                                if (!(ret & VM_FAULT_FALLBACK))
                                        return ret;
                        } else {