]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
thp: move khugepaged_mutex out of khugepaged
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Thu, 13 Sep 2012 00:58:38 +0000 (10:58 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 13 Sep 2012 07:27:50 +0000 (17:27 +1000)
Currently, hugepaged_mutex is used really complexly and hard to
understand, actually, it is just used to serialize start_khugepaged and
khugepaged for these reasons:

- khugepaged_thread is shared between them
- the thp disable path (echo never > transparent_hugepage/enabled) is
  nonblocking, so we need to protect khugepaged_thread to get a stable
  running state

These can be avoided by:

- use the lock to serialize the thread creation and cancel
- thp disable path can not finised until the thread exits

Then khugepaged_thread is fully controlled by start_khugepaged, khugepaged
will be happy without the lock

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 69c968cab3c987d7c507a1462f665de7e571f0e1..4d0b2d7fe79d6cbe579d774fe16307d2d0fbecdd 100644 (file)
@@ -139,9 +139,6 @@ static int start_khugepaged(void)
 {
        int err = 0;
        if (khugepaged_enabled()) {
-               int wakeup;
-
-               mutex_lock(&khugepaged_mutex);
                if (!khugepaged_thread)
                        khugepaged_thread = kthread_run(khugepaged, NULL,
                                                        "khugepaged");
@@ -151,15 +148,17 @@ static int start_khugepaged(void)
                        err = PTR_ERR(khugepaged_thread);
                        khugepaged_thread = NULL;
                }
-               wakeup = !list_empty(&khugepaged_scan.mm_head);
-               mutex_unlock(&khugepaged_mutex);
-               if (wakeup)
+
+               if (!list_empty(&khugepaged_scan.mm_head))
                        wake_up_interruptible(&khugepaged_wait);
 
                set_recommended_min_free_kbytes();
-       } else
+       } else if (khugepaged_thread) {
                /* wakeup to exit */
                wake_up_interruptible(&khugepaged_wait);
+               kthread_stop(khugepaged_thread);
+               khugepaged_thread = NULL;
+       }
 
        return err;
 }
@@ -221,7 +220,12 @@ static ssize_t enabled_store(struct kobject *kobj,
                                TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
 
        if (ret > 0) {
-               int err = start_khugepaged();
+               int err;
+
+               mutex_lock(&khugepaged_mutex);
+               err = start_khugepaged();
+               mutex_unlock(&khugepaged_mutex);
+
                if (err)
                        ret = err;
        }
@@ -2330,20 +2334,10 @@ static int khugepaged(void *none)
        set_freezable();
        set_user_nice(current, 19);
 
-       /* serialize with start_khugepaged() */
-       mutex_lock(&khugepaged_mutex);
-
-       for (;;) {
-               mutex_unlock(&khugepaged_mutex);
+       while (!kthread_should_stop()) {
                VM_BUG_ON(khugepaged_thread != current);
                khugepaged_loop();
                VM_BUG_ON(khugepaged_thread != current);
-
-               mutex_lock(&khugepaged_mutex);
-               if (!khugepaged_enabled())
-                       break;
-               if (unlikely(kthread_should_stop()))
-                       break;
        }
 
        spin_lock(&khugepaged_mm_lock);
@@ -2352,10 +2346,6 @@ static int khugepaged(void *none)
        if (mm_slot)
                collect_mm_slot(mm_slot);
        spin_unlock(&khugepaged_mm_lock);
-
-       khugepaged_thread = NULL;
-       mutex_unlock(&khugepaged_mutex);
-
        return 0;
 }