]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
thp: merge page pre-alloc in khugepaged_loop into khugepaged_do_scan
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Fri, 28 Sep 2012 00:19:17 +0000 (10:19 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Mon, 8 Oct 2012 02:59:58 +0000 (13:59 +1100)
There are two pre-alloc operations in these two function, the different is:
- it allows to sleep if page alloc fail in khugepaged_loop
- it exits immediately if page alloc fail in khugepaged_do_scan

Actually, in khugepaged_do_scan, we can allow the pre-alloc to sleep on
the first failure, then the operation in khugepaged_loop can be removed

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index cfe8694c33ad7e869e813c8065bfaf4bb39b5b1b..e414b1d996a4201e3aff73b6ffa948f544727bf5 100644 (file)
@@ -2288,26 +2288,57 @@ static int khugepaged_wait_event(void)
                kthread_should_stop();
 }
 
-static void khugepaged_do_scan(struct page **hpage)
+static void khugepaged_alloc_sleep(void)
+{
+       wait_event_freezable_timeout(khugepaged_wait, false,
+                       msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
+}
+
+#ifndef CONFIG_NUMA
+static struct page *khugepaged_alloc_hugepage(bool *wait)
+{
+       struct page *hpage;
+
+       do {
+               hpage = alloc_hugepage(khugepaged_defrag());
+               if (!hpage) {
+                       count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
+                       if (!*wait)
+                               return NULL;
+
+                       *wait = false;
+                       khugepaged_alloc_sleep();
+               } else
+                       count_vm_event(THP_COLLAPSE_ALLOC);
+       } while (unlikely(!hpage) && likely(khugepaged_enabled()));
+
+       return hpage;
+}
+#endif
+
+static void khugepaged_do_scan(void)
 {
+       struct page *hpage = NULL;
        unsigned int progress = 0, pass_through_head = 0;
        unsigned int pages = ACCESS_ONCE(khugepaged_pages_to_scan);
+       bool wait = true;
 
        while (progress < pages) {
                cond_resched();
 
 #ifndef CONFIG_NUMA
-               if (!*hpage) {
-                       *hpage = alloc_hugepage(khugepaged_defrag());
-                       if (unlikely(!*hpage)) {
-                               count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
+               if (!hpage)
+                       hpage = khugepaged_alloc_hugepage(&wait);
+
+               if (unlikely(!hpage))
+                       break;
+#else
+               if (IS_ERR(hpage)) {
+                       if (!wait)
                                break;
-                       }
-                       count_vm_event(THP_COLLAPSE_ALLOC);
+                       wait = false;
+                       khugepaged_alloc_sleep();
                }
-#else
-               if (IS_ERR(*hpage))
-                       break;
 #endif
 
                if (unlikely(kthread_should_stop() || freezing(current)))
@@ -2319,37 +2350,16 @@ static void khugepaged_do_scan(struct page **hpage)
                if (khugepaged_has_work() &&
                    pass_through_head < 2)
                        progress += khugepaged_scan_mm_slot(pages - progress,
-                                                           hpage);
+                                                           &hpage);
                else
                        progress = pages;
                spin_unlock(&khugepaged_mm_lock);
        }
-}
 
-static void khugepaged_alloc_sleep(void)
-{
-       wait_event_freezable_timeout(khugepaged_wait, false,
-                       msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
+       if (!IS_ERR_OR_NULL(hpage))
+               put_page(hpage);
 }
 
-#ifndef CONFIG_NUMA
-static struct page *khugepaged_alloc_hugepage(void)
-{
-       struct page *hpage;
-
-       do {
-               hpage = alloc_hugepage(khugepaged_defrag());
-               if (!hpage) {
-                       count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
-                       khugepaged_alloc_sleep();
-               } else
-                       count_vm_event(THP_COLLAPSE_ALLOC);
-       } while (unlikely(!hpage) &&
-                likely(khugepaged_enabled()));
-       return hpage;
-}
-#endif
-
 static void khugepaged_wait_work(void)
 {
        try_to_freeze();
@@ -2370,25 +2380,8 @@ static void khugepaged_wait_work(void)
 
 static void khugepaged_loop(void)
 {
-       struct page *hpage = NULL;
-
        while (likely(khugepaged_enabled())) {
-#ifndef CONFIG_NUMA
-               hpage = khugepaged_alloc_hugepage();
-               if (unlikely(!hpage))
-                       break;
-#else
-               if (IS_ERR(hpage)) {
-                       khugepaged_alloc_sleep();
-                       hpage = NULL;
-               }
-#endif
-
-               khugepaged_do_scan(&hpage);
-
-               if (!IS_ERR_OR_NULL(hpage))
-                       put_page(hpage);
-
+               khugepaged_do_scan();
                khugepaged_wait_work();
        }
 }