]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - net/ipv4/tcp_memcontrol.c
mm: memcontrol: switch to the updated jump-label API
[karo-tx-linux.git] / net / ipv4 / tcp_memcontrol.c
index 2379c1b4efb24505dd9b62316912cdfcdd371001..18bc7f745e9cadd8dde7e53ccc1fe2102fc15f8b 100644 (file)
@@ -8,75 +8,49 @@
 
 int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 {
+       struct mem_cgroup *parent = parent_mem_cgroup(memcg);
+       struct page_counter *counter_parent = NULL;
        /*
         * The root cgroup does not use page_counters, but rather,
         * rely on the data already collected by the network
         * subsystem
         */
-       struct mem_cgroup *parent = parent_mem_cgroup(memcg);
-       struct page_counter *counter_parent = NULL;
-       struct cg_proto *cg_proto, *parent_cg;
-
-       cg_proto = tcp_prot.proto_cgroup(memcg);
-       if (!cg_proto)
+       if (memcg == root_mem_cgroup)
                return 0;
 
-       cg_proto->sysctl_mem[0] = sysctl_tcp_mem[0];
-       cg_proto->sysctl_mem[1] = sysctl_tcp_mem[1];
-       cg_proto->sysctl_mem[2] = sysctl_tcp_mem[2];
-       cg_proto->memory_pressure = 0;
-       cg_proto->memcg = memcg;
+       memcg->tcp_mem.memory_pressure = 0;
 
-       parent_cg = tcp_prot.proto_cgroup(parent);
-       if (parent_cg)
-               counter_parent = &parent_cg->memory_allocated;
+       if (parent)
+               counter_parent = &parent->tcp_mem.memory_allocated;
 
-       page_counter_init(&cg_proto->memory_allocated, counter_parent);
-       percpu_counter_init(&cg_proto->sockets_allocated, 0, GFP_KERNEL);
+       page_counter_init(&memcg->tcp_mem.memory_allocated, counter_parent);
 
        return 0;
 }
-EXPORT_SYMBOL(tcp_init_cgroup);
 
 void tcp_destroy_cgroup(struct mem_cgroup *memcg)
 {
-       struct cg_proto *cg_proto;
-
-       cg_proto = tcp_prot.proto_cgroup(memcg);
-       if (!cg_proto)
+       if (memcg == root_mem_cgroup)
                return;
 
-       percpu_counter_destroy(&cg_proto->sockets_allocated);
-
-       if (test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags))
-               static_key_slow_dec(&memcg_socket_limit_enabled);
-
+       if (memcg->tcp_mem.active)
+               static_branch_dec(&memcg_sockets_enabled_key);
 }
-EXPORT_SYMBOL(tcp_destroy_cgroup);
 
 static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages)
 {
-       struct cg_proto *cg_proto;
-       int i;
        int ret;
 
-       cg_proto = tcp_prot.proto_cgroup(memcg);
-       if (!cg_proto)
+       if (memcg == root_mem_cgroup)
                return -EINVAL;
 
-       ret = page_counter_limit(&cg_proto->memory_allocated, nr_pages);
+       ret = page_counter_limit(&memcg->tcp_mem.memory_allocated, nr_pages);
        if (ret)
                return ret;
 
-       for (i = 0; i < 3; i++)
-               cg_proto->sysctl_mem[i] = min_t(long, nr_pages,
-                                               sysctl_tcp_mem[i]);
-
-       if (nr_pages == PAGE_COUNTER_MAX)
-               clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
-       else {
+       if (!memcg->tcp_mem.active) {
                /*
-                * The active bit needs to be written after the static_key
+                * The active flag needs to be written after the static_key
                 * update. This is what guarantees that the socket activation
                 * function is the last one to run. See sock_update_memcg() for
                 * details, and note that we don't mark any socket as belonging
@@ -90,14 +64,9 @@ static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages)
                 * We never race with the readers in sock_update_memcg(),
                 * because when this value change, the code to process it is not
                 * patched in yet.
-                *
-                * The activated bit is used to guarantee that no two writers
-                * will do the update in the same memcg. Without that, we can't
-                * properly shutdown the static key.
                 */
-               if (!test_and_set_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags))
-                       static_key_slow_inc(&memcg_socket_limit_enabled);
-               set_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+               static_branch_inc(&memcg_sockets_enabled_key);
+               memcg->tcp_mem.active = true;
        }
 
        return 0;
@@ -141,32 +110,32 @@ static ssize_t tcp_cgroup_write(struct kernfs_open_file *of,
 static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
-       struct cg_proto *cg_proto = tcp_prot.proto_cgroup(memcg);
        u64 val;
 
        switch (cft->private) {
        case RES_LIMIT:
-               if (!cg_proto)
-                       return PAGE_COUNTER_MAX;
-               val = cg_proto->memory_allocated.limit;
+               if (memcg == root_mem_cgroup)
+                       val = PAGE_COUNTER_MAX;
+               else
+                       val = memcg->tcp_mem.memory_allocated.limit;
                val *= PAGE_SIZE;
                break;
        case RES_USAGE:
-               if (!cg_proto)
+               if (memcg == root_mem_cgroup)
                        val = atomic_long_read(&tcp_memory_allocated);
                else
-                       val = page_counter_read(&cg_proto->memory_allocated);
+                       val = page_counter_read(&memcg->tcp_mem.memory_allocated);
                val *= PAGE_SIZE;
                break;
        case RES_FAILCNT:
-               if (!cg_proto)
+               if (memcg == root_mem_cgroup)
                        return 0;
-               val = cg_proto->memory_allocated.failcnt;
+               val = memcg->tcp_mem.memory_allocated.failcnt;
                break;
        case RES_MAX_USAGE:
-               if (!cg_proto)
+               if (memcg == root_mem_cgroup)
                        return 0;
-               val = cg_proto->memory_allocated.watermark;
+               val = memcg->tcp_mem.memory_allocated.watermark;
                val *= PAGE_SIZE;
                break;
        default:
@@ -179,19 +148,17 @@ static ssize_t tcp_cgroup_reset(struct kernfs_open_file *of,
                                char *buf, size_t nbytes, loff_t off)
 {
        struct mem_cgroup *memcg;
-       struct cg_proto *cg_proto;
 
        memcg = mem_cgroup_from_css(of_css(of));
-       cg_proto = tcp_prot.proto_cgroup(memcg);
-       if (!cg_proto)
+       if (memcg == root_mem_cgroup)
                return nbytes;
 
        switch (of_cft(of)->private) {
        case RES_MAX_USAGE:
-               page_counter_reset_watermark(&cg_proto->memory_allocated);
+               page_counter_reset_watermark(&memcg->tcp_mem.memory_allocated);
                break;
        case RES_FAILCNT:
-               cg_proto->memory_allocated.failcnt = 0;
+               memcg->tcp_mem.memory_allocated.failcnt = 0;
                break;
        }