]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
memcg/sl[au]b: shrink dead caches
authorGlauber Costa <glommer@parallels.com>
Fri, 9 Nov 2012 03:04:17 +0000 (14:04 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 14 Nov 2012 04:54:56 +0000 (15:54 +1100)
This means that when we destroy a memcg cache that happened to be empty,
those caches may take a lot of time to go away: removing the memcg
reference won't destroy them - because there are pending references, and
the empty pages will stay there, until a shrinker is called upon for any
reason.

In this patch, we will call kmem_cache_shrink() for all dead caches that
cannot be destroyed because of remaining pages.  After shrinking, it is
possible that it could be freed.  If this is not the case, we'll schedule
a lazy worker to keep trying.

Signed-off-by: Glauber Costa <glommer@parallels.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Frederic Weisbecker <fweisbec@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: JoonSoo Kim <js1304@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Rik van Riel <riel@redhat.com>
Cc: Suleiman Souhlal <suleiman@google.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/slab.h
mm/memcontrol.c

index b63152938123e9edf5d860642a98f253fd2762d0..e09aa3d94a7e2cf244f707ecf167d4626304da91 100644 (file)
@@ -213,7 +213,7 @@ struct memcg_cache_params {
                        struct kmem_cache *root_cache;
                        bool dead;
                        atomic_t nr_pages;
-                       struct work_struct destroy;
+                       struct delayed_work destroy;
                };
        };
 };
index 388a21d31eac2851c9aff85cd243046c9a8300a8..b7c4e6be287188d6ce1c33caaac09d0f9302c6af 100644 (file)
@@ -3056,12 +3056,35 @@ static void kmem_cache_destroy_work_func(struct work_struct *w)
 {
        struct kmem_cache *cachep;
        struct memcg_cache_params *p;
+       struct delayed_work *dw = to_delayed_work(w);
 
-       p = container_of(w, struct memcg_cache_params, destroy);
+       p = container_of(dw, struct memcg_cache_params, destroy);
 
        cachep = memcg_params_to_cache(p);
 
-       if (!atomic_read(&cachep->memcg_params->nr_pages))
+       /*
+        * If we get down to 0 after shrink, we could delete right away.
+        * However, memcg_release_pages() already puts us back in the workqueue
+        * in that case. If we proceed deleting, we'll get a dangling
+        * reference, and removing the object from the workqueue in that case
+        * is unnecessary complication. We are not a fast path.
+        *
+        * Note that this case is fundamentally different from racing with
+        * shrink_slab(): if memcg_cgroup_destroy_cache() is called in
+        * kmem_cache_shrink, not only we would be reinserting a dead cache
+        * into the queue, but doing so from inside the worker racing to
+        * destroy it.
+        *
+        * So if we aren't down to zero, we'll just schedule a worker and try
+        * again
+        */
+       if (atomic_read(&cachep->memcg_params->nr_pages) != 0) {
+               kmem_cache_shrink(cachep);
+               if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
+                       return;
+               /* Once per minute should be good enough. */
+               schedule_delayed_work(&cachep->memcg_params->destroy, 60 * HZ);
+       } else
                kmem_cache_destroy(cachep);
 }
 
@@ -3070,11 +3093,31 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
        if (!cachep->memcg_params->dead)
                return;
 
+       /*
+        * There are many ways in which we can get here.
+        *
+        * We can get to a memory-pressure situation while the delayed work is
+        * still pending to run. The vmscan shrinkers can then release all
+        * cache memory and get us to destruction. If this is the case, we'll
+        * be executed twice, which is a bug (the second time will execute over
+        * bogus data). In this case, cancelling the work should be fine.
+        *
+        * But we can also get here from the worker itself, if
+        * kmem_cache_shrink is enough to shake all the remaining objects and
+        * get the page count to 0. In this case, we'll deadlock if we try to
+        * cancel the work (the worker runs with an internal lock held, which
+        * is the same lock we would hold for cancel_delayed_work_sync().)
+        *
+        * Since we can't possibly know who got us here, just refrain from
+        * running if there is already work pending
+        */
+       if (delayed_work_pending(&cachep->memcg_params->destroy))
+               return;
        /*
         * We have to defer the actual destroying to a workqueue, because
         * we might currently be in a context that cannot sleep.
         */
-       schedule_work(&cachep->memcg_params->destroy);
+       schedule_delayed_work(&cachep->memcg_params->destroy, 0);
 }
 
 static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s)
@@ -3226,9 +3269,9 @@ static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
        list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
                cachep = memcg_params_to_cache(params);
                cachep->memcg_params->dead = true;
-               INIT_WORK(&cachep->memcg_params->destroy,
-                         kmem_cache_destroy_work_func);
-               schedule_work(&cachep->memcg_params->destroy);
+               INIT_DELAYED_WORK(&cachep->memcg_params->destroy,
+                                 kmem_cache_destroy_work_func);
+               schedule_delayed_work(&cachep->memcg_params->destroy, 0);
        }
        mutex_unlock(&memcg->slab_caches_mutex);
 }