]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
thp, memcg: split hugepage for memcg oom on cow
authorDavid Rientjes <rientjes@google.com>
Tue, 29 May 2012 22:06:23 +0000 (15:06 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 17 Jan 2013 16:43:58 +0000 (08:43 -0800)
commit 1f1d06c34f7675026326cd9f39ff91e4555cf355 upstream.

On COW, a new hugepage is allocated and charged to the memcg.  If the
system is oom or the charge to the memcg fails, however, the fault
handler will return VM_FAULT_OOM which results in an oom kill.

Instead, it's possible to fallback to splitting the hugepage so that the
COW results only in an order-0 page being allocated and charged to the
memcg which has a higher liklihood to succeed.  This is expensive
because the hugepage must be split in the page fault handler, but it is
much better than unnecessarily oom killing a process.

Signed-off-by: David Rientjes <rientjes@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/huge_memory.c
mm/memory.c

index 8cc11dda6a747e4e9e823c8f988fd441a4b43ad9..a9ab45ec7d5ecb3beb1c1de77aa50545fe8817fa 100644 (file)
@@ -920,6 +920,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                count_vm_event(THP_FAULT_FALLBACK);
                ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
                                                   pmd, orig_pmd, page, haddr);
+               if (ret & VM_FAULT_OOM)
+                       split_huge_page(page);
                put_page(page);
                goto out;
        }
@@ -927,6 +929,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
                put_page(new_page);
+               split_huge_page(page);
                put_page(page);
                ret |= VM_FAULT_OOM;
                goto out;
index 7292acb92f9bb6836e0cfa94873b93692ca2ec92..4da0f8ad142722cadf568d76cd736661bcd7b37c 100644 (file)
@@ -3470,6 +3470,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (unlikely(is_vm_hugetlb_page(vma)))
                return hugetlb_fault(mm, vma, address, flags);
 
+retry:
        pgd = pgd_offset(mm, address);
        pud = pud_alloc(mm, pgd, address);
        if (!pud)
@@ -3483,13 +3484,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                                          pmd, flags);
        } else {
                pmd_t orig_pmd = *pmd;
+               int ret;
+
                barrier();
                if (pmd_trans_huge(orig_pmd)) {
                        if (flags & FAULT_FLAG_WRITE &&
                            !pmd_write(orig_pmd) &&
-                           !pmd_trans_splitting(orig_pmd))
-                               return do_huge_pmd_wp_page(mm, vma, address,
-                                                          pmd, orig_pmd);
+                           !pmd_trans_splitting(orig_pmd)) {
+                               ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
+                                                         orig_pmd);
+                               /*
+                                * If COW results in an oom, the huge pmd will
+                                * have been split, so retry the fault on the
+                                * pte for a smaller charge.
+                                */
+                               if (unlikely(ret & VM_FAULT_OOM))
+                                       goto retry;
+                               return ret;
+                       }
                        return 0;
                }
        }