]> git.kernelconcepts.de Git - mv-sheeva.git/commitdiff
ksm: keep quiet while list empty
authorHugh Dickins <hugh.dickins@tiscali.co.uk>
Tue, 22 Sep 2009 00:02:14 +0000 (17:02 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 22 Sep 2009 14:17:32 +0000 (07:17 -0700)
ksm_scan_thread already sleeps in wait_event_interruptible until setting
ksm_run activates it; but if there's nothing on its list to look at, i.e.
nobody has yet said madvise MADV_MERGEABLE, it's a shame to be clocking
up system time and full_scans: ksmd_should_run added to check that too.

And move the mutex_lock out around it: the new counts showed that when
ksm_run is stopped, a little work often got done afterwards, because it
had been read before taking the mutex.

Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Acked-by: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/ksm.c

index 81f692e836db4cae8fbb7b3ce5214467ef033fa7..2849422448a3f5314bb05f9430c42358f2884b06 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1280,21 +1280,27 @@ static void ksm_do_scan(unsigned int scan_npages)
        }
 }
 
+static int ksmd_should_run(void)
+{
+       return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
+}
+
 static int ksm_scan_thread(void *nothing)
 {
        set_user_nice(current, 5);
 
        while (!kthread_should_stop()) {
-               if (ksm_run & KSM_RUN_MERGE) {
-                       mutex_lock(&ksm_thread_mutex);
+               mutex_lock(&ksm_thread_mutex);
+               if (ksmd_should_run())
                        ksm_do_scan(ksm_thread_pages_to_scan);
-                       mutex_unlock(&ksm_thread_mutex);
+               mutex_unlock(&ksm_thread_mutex);
+
+               if (ksmd_should_run()) {
                        schedule_timeout_interruptible(
                                msecs_to_jiffies(ksm_thread_sleep_millisecs));
                } else {
                        wait_event_interruptible(ksm_thread_wait,
-                                       (ksm_run & KSM_RUN_MERGE) ||
-                                       kthread_should_stop());
+                               ksmd_should_run() || kthread_should_stop());
                }
        }
        return 0;
@@ -1339,10 +1345,16 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
 
 int __ksm_enter(struct mm_struct *mm)
 {
-       struct mm_slot *mm_slot = alloc_mm_slot();
+       struct mm_slot *mm_slot;
+       int needs_wakeup;
+
+       mm_slot = alloc_mm_slot();
        if (!mm_slot)
                return -ENOMEM;
 
+       /* Check ksm_run too?  Would need tighter locking */
+       needs_wakeup = list_empty(&ksm_mm_head.mm_list);
+
        spin_lock(&ksm_mmlist_lock);
        insert_to_mm_slots_hash(mm, mm_slot);
        /*
@@ -1354,6 +1366,10 @@ int __ksm_enter(struct mm_struct *mm)
        spin_unlock(&ksm_mmlist_lock);
 
        set_bit(MMF_VM_MERGEABLE, &mm->flags);
+
+       if (needs_wakeup)
+               wake_up_interruptible(&ksm_thread_wait);
+
        return 0;
 }