]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
mm: memcontrol: flatten struct cg_proto
authorJohannes Weiner <hannes@cmpxchg.org>
Wed, 20 Jan 2016 23:02:50 +0000 (15:02 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 21 Jan 2016 01:09:18 +0000 (17:09 -0800)
There are no more external users of struct cg_proto, flatten the
structure into struct mem_cgroup.

Since using those struct members doesn't stand out as much anymore,
add cgroup2 static branches to make it clearer which code is legacy.

Suggested-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c

index 47995b49942935056d0a08269aa150679ad5b04d..a3869bf97746917fd439ce0e28a4893c8f12abc1 100644 (file)
@@ -85,12 +85,6 @@ enum mem_cgroup_events_target {
        MEM_CGROUP_NTARGETS,
 };
 
-struct cg_proto {
-       struct page_counter     memory_allocated;       /* Current allocated memory. */
-       int                     memory_pressure;
-       bool                    active;
-};
-
 #ifdef CONFIG_MEMCG
 struct mem_cgroup_stat_cpu {
        long count[MEM_CGROUP_STAT_NSTATS];
@@ -169,8 +163,11 @@ struct mem_cgroup {
 
        /* Accounted resources */
        struct page_counter memory;
+
+       /* Legacy consumer-oriented counters */
        struct page_counter memsw;
        struct page_counter kmem;
+       struct page_counter tcpmem;
 
        /* Normal memory consumption range */
        unsigned long low;
@@ -236,7 +233,8 @@ struct mem_cgroup {
        unsigned long           socket_pressure;
 
        /* Legacy tcp memory accounting */
-       struct cg_proto tcp_mem;
+       bool                    tcpmem_active;
+       int                     tcpmem_pressure;
 
 #ifndef CONFIG_SLOB
         /* Index in the kmem_cache->memcg_params.memcg_caches array */
@@ -715,7 +713,7 @@ extern struct static_key_false memcg_sockets_enabled_key;
 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
 {
-       if (memcg->tcp_mem.memory_pressure)
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
                return true;
        do {
                if (time_before(jiffies, memcg->socket_pressure))
index 379f9911b87b7c903b99570275dda46e1b25cdde..6937f16f5ecb3417d9e544727ba430c33ce3b8c1 100644 (file)
@@ -2843,7 +2843,7 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
                counter = &memcg->kmem;
                break;
        case _TCP:
-               counter = &memcg->tcp_mem.memory_allocated;
+               counter = &memcg->tcpmem;
                break;
        default:
                BUG();
@@ -3028,11 +3028,11 @@ static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
 
        mutex_lock(&memcg_limit_mutex);
 
-       ret = page_counter_limit(&memcg->tcp_mem.memory_allocated, limit);
+       ret = page_counter_limit(&memcg->tcpmem, limit);
        if (ret)
                goto out;
 
-       if (!memcg->tcp_mem.active) {
+       if (!memcg->tcpmem_active) {
                /*
                 * The active flag needs to be written after the static_key
                 * update. This is what guarantees that the socket activation
@@ -3050,7 +3050,7 @@ static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
                 * patched in yet.
                 */
                static_branch_inc(&memcg_sockets_enabled_key);
-               memcg->tcp_mem.active = true;
+               memcg->tcpmem_active = true;
        }
 out:
        mutex_unlock(&memcg_limit_mutex);
@@ -3119,7 +3119,7 @@ static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
                counter = &memcg->kmem;
                break;
        case _TCP:
-               counter = &memcg->tcp_mem.memory_allocated;
+               counter = &memcg->tcpmem;
                break;
        default:
                BUG();
@@ -4295,8 +4295,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
                memcg->soft_limit = PAGE_COUNTER_MAX;
                page_counter_init(&memcg->memsw, &parent->memsw);
                page_counter_init(&memcg->kmem, &parent->kmem);
-               page_counter_init(&memcg->tcp_mem.memory_allocated,
-                                 &parent->tcp_mem.memory_allocated);
+               page_counter_init(&memcg->tcpmem, &parent->tcpmem);
 
                /*
                 * No need to take a reference to the parent because cgroup
@@ -4308,7 +4307,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
                memcg->soft_limit = PAGE_COUNTER_MAX;
                page_counter_init(&memcg->memsw, NULL);
                page_counter_init(&memcg->kmem, NULL);
-               page_counter_init(&memcg->tcp_mem.memory_allocated, NULL);
+               page_counter_init(&memcg->tcpmem, NULL);
                /*
                 * Deeper hierachy with use_hierarchy == false doesn't make
                 * much sense so let cgroup subsystem know about this
@@ -4374,7 +4373,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
        if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
                static_branch_dec(&memcg_sockets_enabled_key);
 
-       if (memcg->tcp_mem.active)
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
                static_branch_dec(&memcg_sockets_enabled_key);
 
        memcg_free_kmem(memcg);
@@ -5601,7 +5600,7 @@ void sock_update_memcg(struct sock *sk)
        memcg = mem_cgroup_from_task(current);
        if (memcg == root_mem_cgroup)
                goto out;
-       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcp_mem.active)
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
                goto out;
        if (css_tryget_online(&memcg->css))
                sk->sk_memcg = memcg;
@@ -5629,15 +5628,14 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
        gfp_t gfp_mask = GFP_KERNEL;
 
        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
-               struct page_counter *counter;
+               struct page_counter *fail;
 
-               if (page_counter_try_charge(&memcg->tcp_mem.memory_allocated,
-                                           nr_pages, &counter)) {
-                       memcg->tcp_mem.memory_pressure = 0;
+               if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
+                       memcg->tcpmem_pressure = 0;
                        return true;
                }
-               page_counter_charge(&memcg->tcp_mem.memory_allocated, nr_pages);
-               memcg->tcp_mem.memory_pressure = 1;
+               page_counter_charge(&memcg->tcpmem, nr_pages);
+               memcg->tcpmem_pressure = 1;
                return false;
        }
 
@@ -5660,8 +5658,7 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
 {
        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
-               page_counter_uncharge(&memcg->tcp_mem.memory_allocated,
-                                     nr_pages);
+               page_counter_uncharge(&memcg->tcpmem, nr_pages);
                return;
        }