]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
shrinker: add node awareness
authorDave Chinner <dchinner@redhat.com>
Wed, 28 Aug 2013 00:18:03 +0000 (10:18 +1000)
committerAl Viro <viro@zeniv.linux.org.uk>
Tue, 10 Sep 2013 22:56:31 +0000 (18:56 -0400)
Pass the node of the current zone being reclaimed to shrink_slab(),
allowing the shrinker control nodemask to be set appropriately for node
aware shrinkers.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Glauber Costa <glommer@openvz.org>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
drivers/staging/android/ashmem.c
fs/drop_caches.c
include/linux/shrinker.h
mm/memory-failure.c
mm/vmscan.c

index 21a3f7250531c6a4e7891843f08f1919a31f890e..65f36d7287149a799c88207fcc1566134b6ea8d7 100644 (file)
@@ -692,6 +692,9 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                                .gfp_mask = GFP_KERNEL,
                                .nr_to_scan = 0,
                        };
+
+                       nodes_setall(sc.nodes_to_scan);
+
                        ret = ashmem_shrink(&ashmem_shrinker, &sc);
                        sc.nr_to_scan = ret;
                        ashmem_shrink(&ashmem_shrinker, &sc);
index c00e055b62820945bef291fa68b145a4d7145667..9fd702f5bfb2886a715e787b6470615ea07021e0 100644 (file)
@@ -44,6 +44,7 @@ static void drop_slab(void)
                .gfp_mask = GFP_KERNEL,
        };
 
+       nodes_setall(shrink.nodes_to_scan);
        do {
                nr_objects = shrink_slab(&shrink, 1000, 1000);
        } while (nr_objects > 10);
index 884e76222e1baf66d3a7649a3f07dcf7f54f52fe..76f520c4c394866e22839a3c12261e15280ee382 100644 (file)
@@ -16,6 +16,9 @@ struct shrink_control {
 
        /* How many slab objects shrinker() should scan and try to reclaim */
        unsigned long nr_to_scan;
+
+       /* shrink from these nodes */
+       nodemask_t nodes_to_scan;
 };
 
 #define SHRINK_STOP (~0UL)
index d84c5e5331bb5199632f46fda6d3ca3fef9dbaf4..baa4e0a45dec980f42329ec745d5c543951ccdd9 100644 (file)
@@ -248,10 +248,12 @@ void shake_page(struct page *p, int access)
         */
        if (access) {
                int nr;
+               int nid = page_to_nid(p);
                do {
                        struct shrink_control shrink = {
                                .gfp_mask = GFP_KERNEL,
                        };
+                       node_set(nid, shrink.nodes_to_scan);
 
                        nr = shrink_slab(&shrink, 1000, 1000);
                        if (page_count(p) == 1)
index 4d4e859b4b9c7e59853eafbeb4348afb4a701a5b..fe0d5c458440c4f4a8f04eb6f6f6537c89bf6fcc 100644 (file)
@@ -2374,12 +2374,16 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                 */
                if (global_reclaim(sc)) {
                        unsigned long lru_pages = 0;
+
+                       nodes_clear(shrink->nodes_to_scan);
                        for_each_zone_zonelist(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask)) {
                                if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                                        continue;
 
                                lru_pages += zone_reclaimable_pages(zone);
+                               node_set(zone_to_nid(zone),
+                                        shrink->nodes_to_scan);
                        }
 
                        shrink_slab(shrink, sc->nr_scanned, lru_pages);
@@ -2836,6 +2840,8 @@ static bool kswapd_shrink_zone(struct zone *zone,
                return true;
 
        shrink_zone(zone, sc);
+       nodes_clear(shrink.nodes_to_scan);
+       node_set(zone_to_nid(zone), shrink.nodes_to_scan);
 
        reclaim_state->reclaimed_slab = 0;
        nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages);
@@ -3544,10 +3550,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                 * number of slab pages and shake the slab until it is reduced
                 * by the same nr_pages that we used for reclaiming unmapped
                 * pages.
-                *
-                * Note that shrink_slab will free memory on all zones and may
-                * take a long time.
                 */
+               nodes_clear(shrink.nodes_to_scan);
+               node_set(zone_to_nid(zone), shrink.nodes_to_scan);
                for (;;) {
                        unsigned long lru_pages = zone_reclaimable_pages(zone);