]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - net/ipv4/tcp_memcontrol.c
net: tcp_memcontrol: simplify the per-memcg limit access
[karo-tx-linux.git] / net / ipv4 / tcp_memcontrol.c
index 2379c1b4efb24505dd9b62316912cdfcdd371001..ef4268d12e43d04a75ccd75bd472cf89e8d3b16d 100644 (file)
@@ -21,9 +21,6 @@ int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
        if (!cg_proto)
                return 0;
 
-       cg_proto->sysctl_mem[0] = sysctl_tcp_mem[0];
-       cg_proto->sysctl_mem[1] = sysctl_tcp_mem[1];
-       cg_proto->sysctl_mem[2] = sysctl_tcp_mem[2];
        cg_proto->memory_pressure = 0;
        cg_proto->memcg = memcg;
 
@@ -32,7 +29,6 @@ int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
                counter_parent = &parent_cg->memory_allocated;
 
        page_counter_init(&cg_proto->memory_allocated, counter_parent);
-       percpu_counter_init(&cg_proto->sockets_allocated, 0, GFP_KERNEL);
 
        return 0;
 }
@@ -46,9 +42,7 @@ void tcp_destroy_cgroup(struct mem_cgroup *memcg)
        if (!cg_proto)
                return;
 
-       percpu_counter_destroy(&cg_proto->sockets_allocated);
-
-       if (test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags))
+       if (cg_proto->active)
                static_key_slow_dec(&memcg_socket_limit_enabled);
 
 }
@@ -57,7 +51,6 @@ EXPORT_SYMBOL(tcp_destroy_cgroup);
 static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages)
 {
        struct cg_proto *cg_proto;
-       int i;
        int ret;
 
        cg_proto = tcp_prot.proto_cgroup(memcg);
@@ -68,15 +61,9 @@ static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages)
        if (ret)
                return ret;
 
-       for (i = 0; i < 3; i++)
-               cg_proto->sysctl_mem[i] = min_t(long, nr_pages,
-                                               sysctl_tcp_mem[i]);
-
-       if (nr_pages == PAGE_COUNTER_MAX)
-               clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
-       else {
+       if (!cg_proto->active) {
                /*
-                * The active bit needs to be written after the static_key
+                * The active flag needs to be written after the static_key
                 * update. This is what guarantees that the socket activation
                 * function is the last one to run. See sock_update_memcg() for
                 * details, and note that we don't mark any socket as belonging
@@ -90,14 +77,9 @@ static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages)
                 * We never race with the readers in sock_update_memcg(),
                 * because when this value change, the code to process it is not
                 * patched in yet.
-                *
-                * The activated bit is used to guarantee that no two writers
-                * will do the update in the same memcg. Without that, we can't
-                * properly shutdown the static key.
                 */
-               if (!test_and_set_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags))
-                       static_key_slow_inc(&memcg_socket_limit_enabled);
-               set_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+               static_key_slow_inc(&memcg_socket_limit_enabled);
+               cg_proto->active = true;
        }
 
        return 0;