]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/memcontrol.c
Merge branch 'for-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[karo-tx-linux.git] / mm / memcontrol.c
index df87bdd4d692694d8c643d0511a9592f722013b5..0f1d92163f30321caa6aac197d164fc6d46362be 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/fs.h>
 #include <linux/seq_file.h>
 #include <linux/vmalloc.h>
+#include <linux/vmpressure.h>
 #include <linux/mm_inline.h>
 #include <linux/page_cgroup.h>
 #include <linux/cpu.h>
@@ -152,8 +153,13 @@ struct mem_cgroup_stat_cpu {
 };
 
 struct mem_cgroup_reclaim_iter {
-       /* css_id of the last scanned hierarchy member */
-       int position;
+       /*
+        * last scanned hierarchy member. Valid only if last_dead_count
+        * matches memcg->dead_count of the hierarchy root group.
+        */
+       struct mem_cgroup *last_visited;
+       unsigned long last_dead_count;
+
        /* scan generation, increased every round-trip */
        unsigned int generation;
 };
@@ -256,6 +262,9 @@ struct mem_cgroup {
         */
        struct res_counter res;
 
+       /* vmpressure notifications */
+       struct vmpressure vmpressure;
+
        union {
                /*
                 * the counter to account for mem+swap usage.
@@ -335,6 +344,7 @@ struct mem_cgroup {
        struct mem_cgroup_stat_cpu nocpu_base;
        spinlock_t pcp_counter_lock;
 
+       atomic_t        dead_count;
 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
        struct tcp_memcontrol tcp_mem;
 #endif
@@ -353,6 +363,7 @@ struct mem_cgroup {
        atomic_t        numainfo_events;
        atomic_t        numainfo_updating;
 #endif
+
        /*
         * Per cgroup active and inactive list, similar to the
         * per zone LRU lists.
@@ -504,6 +515,24 @@ struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
        return container_of(s, struct mem_cgroup, css);
 }
 
+/* Some nice accessors for the vmpressure. */
+struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
+{
+       if (!memcg)
+               memcg = root_mem_cgroup;
+       return &memcg->vmpressure;
+}
+
+struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
+{
+       return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
+}
+
+struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css)
+{
+       return &mem_cgroup_from_css(css)->vmpressure;
+}
+
 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
 {
        return (memcg == root_mem_cgroup);
@@ -1067,6 +1096,51 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
        return memcg;
 }
 
+/*
+ * Returns a next (in a pre-order walk) alive memcg (with elevated css
+ * ref. count) or NULL if the whole root's subtree has been visited.
+ *
+ * helper function to be used by mem_cgroup_iter
+ */
+static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
+               struct mem_cgroup *last_visited)
+{
+       struct cgroup *prev_cgroup, *next_cgroup;
+
+       /*
+        * Root is not visited by cgroup iterators so it needs an
+        * explicit visit.
+        */
+       if (!last_visited)
+               return root;
+
+       prev_cgroup = (last_visited == root) ? NULL
+               : last_visited->css.cgroup;
+skip_node:
+       next_cgroup = cgroup_next_descendant_pre(
+                       prev_cgroup, root->css.cgroup);
+
+       /*
+        * Even if we found a group we have to make sure it is
+        * alive. css && !memcg means that the groups should be
+        * skipped and we should continue the tree walk.
+        * last_visited css is safe to use because it is
+        * protected by css_get and the tree walk is rcu safe.
+        */
+       if (next_cgroup) {
+               struct mem_cgroup *mem = mem_cgroup_from_cont(
+                               next_cgroup);
+               if (css_tryget(&mem->css))
+                       return mem;
+               else {
+                       prev_cgroup = next_cgroup;
+                       goto skip_node;
+               }
+       }
+
+       return NULL;
+}
+
 /**
  * mem_cgroup_iter - iterate over memory cgroup hierarchy
  * @root: hierarchy root
@@ -1089,7 +1163,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
                                   struct mem_cgroup_reclaim_cookie *reclaim)
 {
        struct mem_cgroup *memcg = NULL;
-       int id = 0;
+       struct mem_cgroup *last_visited = NULL;
+       unsigned long uninitialized_var(dead_count);
 
        if (mem_cgroup_disabled())
                return NULL;
@@ -1098,20 +1173,17 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
                root = root_mem_cgroup;
 
        if (prev && !reclaim)
-               id = css_id(&prev->css);
-
-       if (prev && prev != root)
-               css_put(&prev->css);
+               last_visited = prev;
 
        if (!root->use_hierarchy && root != root_mem_cgroup) {
                if (prev)
-                       return NULL;
+                       goto out_css_put;
                return root;
        }
 
+       rcu_read_lock();
        while (!memcg) {
                struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
-               struct cgroup_subsys_state *css;
 
                if (reclaim) {
                        int nid = zone_to_nid(reclaim->zone);
@@ -1120,31 +1192,60 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 
                        mz = mem_cgroup_zoneinfo(root, nid, zid);
                        iter = &mz->reclaim_iter[reclaim->priority];
-                       if (prev && reclaim->generation != iter->generation)
-                               return NULL;
-                       id = iter->position;
+                       last_visited = iter->last_visited;
+                       if (prev && reclaim->generation != iter->generation) {
+                               iter->last_visited = NULL;
+                               goto out_unlock;
+                       }
+
+                       /*
+                        * If the dead_count mismatches, a destruction
+                        * has happened or is happening concurrently.
+                        * If the dead_count matches, a destruction
+                        * might still happen concurrently, but since
+                        * we checked under RCU, that destruction
+                        * won't free the object until we release the
+                        * RCU reader lock.  Thus, the dead_count
+                        * check verifies the pointer is still valid,
+                        * css_tryget() verifies the cgroup pointed to
+                        * is alive.
+                        */
+                       dead_count = atomic_read(&root->dead_count);
+                       smp_rmb();
+                       last_visited = iter->last_visited;
+                       if (last_visited) {
+                               if ((dead_count != iter->last_dead_count) ||
+                                       !css_tryget(&last_visited->css)) {
+                                       last_visited = NULL;
+                               }
+                       }
                }
 
-               rcu_read_lock();
-               css = css_get_next(&mem_cgroup_subsys, id + 1, &root->css, &id);
-               if (css) {
-                       if (css == &root->css || css_tryget(css))
-                               memcg = mem_cgroup_from_css(css);
-               } else
-                       id = 0;
-               rcu_read_unlock();
+               memcg = __mem_cgroup_iter_next(root, last_visited);
 
                if (reclaim) {
-                       iter->position = id;
-                       if (!css)
+                       if (last_visited)
+                               css_put(&last_visited->css);
+
+                       iter->last_visited = memcg;
+                       smp_wmb();
+                       iter->last_dead_count = dead_count;
+
+                       if (!memcg)
                                iter->generation++;
                        else if (!prev && memcg)
                                reclaim->generation = iter->generation;
                }
 
-               if (prev && !css)
-                       return NULL;
+               if (prev && !memcg)
+                       goto out_unlock;
        }
+out_unlock:
+       rcu_read_unlock();
+out_css_put:
+       if (prev && prev != root)
+               css_put(&prev->css);
+
        return memcg;
 }
 
@@ -1686,11 +1787,11 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
        struct task_struct *chosen = NULL;
 
        /*
-        * If current has a pending SIGKILL, then automatically select it.  The
-        * goal is to allow it to allocate so that it may quickly exit and free
-        * its memory.
+        * If current has a pending SIGKILL or is exiting, then automatically
+        * select it.  The goal is to allow it to allocate so that it may
+        * quickly exit and free its memory.
         */
-       if (fatal_signal_pending(current)) {
+       if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
                set_thread_flag(TIF_MEMDIE);
                return;
        }
@@ -3012,6 +3113,8 @@ void memcg_update_array_size(int num)
                memcg_limited_groups_array_size = memcg_caches_array_size(num);
 }
 
+static void kmem_cache_destroy_work_func(struct work_struct *w);
+
 int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
 {
        struct memcg_cache_params *cur_params = s->memcg_params;
@@ -3031,6 +3134,8 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
                        return -ENOMEM;
                }
 
+               INIT_WORK(&s->memcg_params->destroy,
+                               kmem_cache_destroy_work_func);
                s->memcg_params->is_root_cache = true;
 
                /*
@@ -3078,6 +3183,8 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
        if (!s->memcg_params)
                return -ENOMEM;
 
+       INIT_WORK(&s->memcg_params->destroy,
+                       kmem_cache_destroy_work_func);
        if (memcg) {
                s->memcg_params->memcg = memcg;
                s->memcg_params->root_cache = root_cache;
@@ -3108,12 +3215,12 @@ void memcg_release_cache(struct kmem_cache *s)
 
        root = s->memcg_params->root_cache;
        root->memcg_params->memcg_caches[id] = NULL;
-       mem_cgroup_put(memcg);
 
        mutex_lock(&memcg->slab_caches_mutex);
        list_del(&s->memcg_params->list);
        mutex_unlock(&memcg->slab_caches_mutex);
 
+       mem_cgroup_put(memcg);
 out:
        kfree(s->memcg_params);
 }
@@ -3359,8 +3466,6 @@ static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
        list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
                cachep = memcg_params_to_cache(params);
                cachep->memcg_params->dead = true;
-               INIT_WORK(&cachep->memcg_params->destroy,
-                                 kmem_cache_destroy_work_func);
                schedule_work(&cachep->memcg_params->destroy);
        }
        mutex_unlock(&memcg->slab_caches_mutex);
@@ -3379,7 +3484,6 @@ static void memcg_create_cache_work_func(struct work_struct *w)
 
 /*
  * Enqueue the creation of a per-memcg kmem_cache.
- * Called with rcu_read_lock.
  */
 static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
                                         struct kmem_cache *cachep)
@@ -3387,12 +3491,8 @@ static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
        struct create_work *cw;
 
        cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
-       if (cw == NULL)
-               return;
-
-       /* The corresponding put will be done in the workqueue. */
-       if (!css_tryget(&memcg->css)) {
-               kfree(cw);
+       if (cw == NULL) {
+               css_put(&memcg->css);
                return;
        }
 
@@ -3448,10 +3548,9 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
 
        rcu_read_lock();
        memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
-       rcu_read_unlock();
 
        if (!memcg_can_account_kmem(memcg))
-               return cachep;
+               goto out;
 
        idx = memcg_cache_id(memcg);
 
@@ -3460,29 +3559,38 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
         * code updating memcg_caches will issue a write barrier to match this.
         */
        read_barrier_depends();
-       if (unlikely(cachep->memcg_params->memcg_caches[idx] == NULL)) {
-               /*
-                * If we are in a safe context (can wait, and not in interrupt
-                * context), we could be be predictable and return right away.
-                * This would guarantee that the allocation being performed
-                * already belongs in the new cache.
-                *
-                * However, there are some clashes that can arrive from locking.
-                * For instance, because we acquire the slab_mutex while doing
-                * kmem_cache_dup, this means no further allocation could happen
-                * with the slab_mutex held.
-                *
-                * Also, because cache creation issue get_online_cpus(), this
-                * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
-                * that ends up reversed during cpu hotplug. (cpuset allocates
-                * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
-                * better to defer everything.
-                */
-               memcg_create_cache_enqueue(memcg, cachep);
-               return cachep;
+       if (likely(cachep->memcg_params->memcg_caches[idx])) {
+               cachep = cachep->memcg_params->memcg_caches[idx];
+               goto out;
        }
 
-       return cachep->memcg_params->memcg_caches[idx];
+       /* The corresponding put will be done in the workqueue. */
+       if (!css_tryget(&memcg->css))
+               goto out;
+       rcu_read_unlock();
+
+       /*
+        * If we are in a safe context (can wait, and not in interrupt
+        * context), we could be be predictable and return right away.
+        * This would guarantee that the allocation being performed
+        * already belongs in the new cache.
+        *
+        * However, there are some clashes that can arrive from locking.
+        * For instance, because we acquire the slab_mutex while doing
+        * kmem_cache_dup, this means no further allocation could happen
+        * with the slab_mutex held.
+        *
+        * Also, because cache creation issue get_online_cpus(), this
+        * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
+        * that ends up reversed during cpu hotplug. (cpuset allocates
+        * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
+        * better to defer everything.
+        */
+       memcg_create_cache_enqueue(memcg, cachep);
+       return cachep;
+out:
+       rcu_read_unlock();
+       return cachep;
 }
 EXPORT_SYMBOL(__memcg_kmem_get_cache);
 
@@ -4944,9 +5052,6 @@ static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
        type = MEMFILE_TYPE(cft->private);
        name = MEMFILE_ATTR(cft->private);
 
-       if (!do_swap_account && type == _MEMSWAP)
-               return -EOPNOTSUPP;
-
        switch (type) {
        case _MEM:
                if (name == RES_USAGE)
@@ -5081,9 +5186,6 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
        type = MEMFILE_TYPE(cft->private);
        name = MEMFILE_ATTR(cft->private);
 
-       if (!do_swap_account && type == _MEMSWAP)
-               return -EOPNOTSUPP;
-
        switch (name) {
        case RES_LIMIT:
                if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
@@ -5160,9 +5262,6 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
        type = MEMFILE_TYPE(event);
        name = MEMFILE_ATTR(event);
 
-       if (!do_swap_account && type == _MEMSWAP)
-               return -EOPNOTSUPP;
-
        switch (name) {
        case RES_MAX_USAGE:
                if (type == _MEM)
@@ -5741,7 +5840,7 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
                return ret;
 
        return mem_cgroup_sockets_init(memcg, ss);
-};
+}
 
 static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
 {
@@ -5836,6 +5935,11 @@ static struct cftype mem_cgroup_files[] = {
                .unregister_event = mem_cgroup_oom_unregister_event,
                .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
        },
+       {
+               .name = "pressure_level",
+               .register_event = vmpressure_register_event,
+               .unregister_event = vmpressure_unregister_event,
+       },
 #ifdef CONFIG_NUMA
        {
                .name = "numa_stat",
@@ -6117,6 +6221,7 @@ mem_cgroup_css_alloc(struct cgroup *cont)
        memcg->move_charge_at_immigrate = 0;
        mutex_init(&memcg->thresholds_lock);
        spin_lock_init(&memcg->move_lock);
+       vmpressure_init(&memcg->vmpressure);
 
        return &memcg->css;
 
@@ -6182,10 +6287,29 @@ mem_cgroup_css_online(struct cgroup *cont)
        return error;
 }
 
+/*
+ * Announce all parents that a group from their hierarchy is gone.
+ */
+static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
+{
+       struct mem_cgroup *parent = memcg;
+
+       while ((parent = parent_mem_cgroup(parent)))
+               atomic_inc(&parent->dead_count);
+
+       /*
+        * if the root memcg is not hierarchical we have to check it
+        * explicitely.
+        */
+       if (!root_mem_cgroup->use_hierarchy)
+               atomic_inc(&root_mem_cgroup->dead_count);
+}
+
 static void mem_cgroup_css_offline(struct cgroup *cont)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
 
+       mem_cgroup_invalidate_reclaim_iterators(memcg);
        mem_cgroup_reparent_charges(memcg);
        mem_cgroup_destroy_all_caches(memcg);
 }