]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
mm: workingset: separate shadow unpacking and refault calculation
authorJohannes Weiner <hannes@cmpxchg.org>
Tue, 15 Mar 2016 21:57:10 +0000 (14:57 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Mar 2016 23:55:16 +0000 (16:55 -0700)
Per-cgroup thrash detection will need to derive a live memcg from the
eviction cookie, and doing that inside unpack_shadow() will get nasty
with the reference handling spread over two functions.

In preparation, make unpack_shadow() clearly about extracting static
data, and let workingset_refault() do all the higher-level handling.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/workingset.c

index 3ef92f6e41fe034b56f05d8b6372b9ea099eeccd..f874b2c663e389ad190b052c8f2acf7509f4a643 100644 (file)
@@ -165,13 +165,10 @@ static void *pack_shadow(unsigned long eviction, struct zone *zone)
        return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
 }
 
-static void unpack_shadow(void *shadow,
-                         struct zone **zone,
-                         unsigned long *distance)
+static void unpack_shadow(void *shadow, struct zone **zonep,
+                         unsigned long *evictionp)
 {
        unsigned long entry = (unsigned long)shadow;
-       unsigned long eviction;
-       unsigned long refault;
        int zid, nid;
 
        entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
@@ -179,29 +176,9 @@ static void unpack_shadow(void *shadow,
        entry >>= ZONES_SHIFT;
        nid = entry & ((1UL << NODES_SHIFT) - 1);
        entry >>= NODES_SHIFT;
-       eviction = entry;
-
-       *zone = NODE_DATA(nid)->node_zones + zid;
 
-       refault = atomic_long_read(&(*zone)->inactive_age);
-
-       /*
-        * The unsigned subtraction here gives an accurate distance
-        * across inactive_age overflows in most cases.
-        *
-        * There is a special case: usually, shadow entries have a
-        * short lifetime and are either refaulted or reclaimed along
-        * with the inode before they get too old.  But it is not
-        * impossible for the inactive_age to lap a shadow entry in
-        * the field, which can then can result in a false small
-        * refault distance, leading to a false activation should this
-        * old entry actually refault again.  However, earlier kernels
-        * used to deactivate unconditionally with *every* reclaim
-        * invocation for the longest time, so the occasional
-        * inappropriate activation leading to pressure on the active
-        * list is not a problem.
-        */
-       *distance = (refault - eviction) & EVICTION_MASK;
+       *zonep = NODE_DATA(nid)->node_zones + zid;
+       *evictionp = entry;
 }
 
 /**
@@ -233,9 +210,32 @@ void *workingset_eviction(struct address_space *mapping, struct page *page)
 bool workingset_refault(void *shadow)
 {
        unsigned long refault_distance;
+       unsigned long eviction;
+       unsigned long refault;
        struct zone *zone;
 
-       unpack_shadow(shadow, &zone, &refault_distance);
+       unpack_shadow(shadow, &zone, &eviction);
+
+       refault = atomic_long_read(&zone->inactive_age);
+
+       /*
+        * The unsigned subtraction here gives an accurate distance
+        * across inactive_age overflows in most cases.
+        *
+        * There is a special case: usually, shadow entries have a
+        * short lifetime and are either refaulted or reclaimed along
+        * with the inode before they get too old.  But it is not
+        * impossible for the inactive_age to lap a shadow entry in
+        * the field, which can then can result in a false small
+        * refault distance, leading to a false activation should this
+        * old entry actually refault again.  However, earlier kernels
+        * used to deactivate unconditionally with *every* reclaim
+        * invocation for the longest time, so the occasional
+        * inappropriate activation leading to pressure on the active
+        * list is not a problem.
+        */
+       refault_distance = (refault - eviction) & EVICTION_MASK;
+
        inc_zone_state(zone, WORKINGSET_REFAULT);
 
        if (refault_distance <= zone_page_state(zone, NR_ACTIVE_FILE)) {