]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/memcontrol.c
mm: memcontrol: switch to the updated jump-label API
[karo-tx-linux.git] / mm / memcontrol.c
index fc10620967c79d7b8fbbcc0f82fd9804b1562f13..54eae4f19d803a21321721f267ca9872764629dd 100644 (file)
 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
 EXPORT_SYMBOL(memory_cgrp_subsys);
 
+struct mem_cgroup *root_mem_cgroup __read_mostly;
+
 #define MEM_CGROUP_RECLAIM_RETRIES     5
-static struct mem_cgroup *root_mem_cgroup __read_mostly;
-struct cgroup_subsys_state *mem_cgroup_root_css __read_mostly;
+
+/* Socket memory accounting disabled? */
+static bool cgroup_memory_nosocket;
 
 /* Whether the swap controller is active */
 #ifdef CONFIG_MEMCG_SWAP
@@ -87,6 +90,12 @@ int do_swap_account __read_mostly;
 #define do_swap_account                0
 #endif
 
+/* Whether legacy memory+swap accounting is active */
+static bool do_memsw_account(void)
+{
+       return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
+}
+
 static const char * const mem_cgroup_stat_names[] = {
        "cache",
        "rss",
@@ -288,64 +297,6 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
        return mem_cgroup_from_css(css);
 }
 
-/* Writing them here to avoid exposing memcg's inner layout */
-#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
-
-void sock_update_memcg(struct sock *sk)
-{
-       if (mem_cgroup_sockets_enabled) {
-               struct mem_cgroup *memcg;
-               struct cg_proto *cg_proto;
-
-               BUG_ON(!sk->sk_prot->proto_cgroup);
-
-               /* Socket cloning can throw us here with sk_cgrp already
-                * filled. It won't however, necessarily happen from
-                * process context. So the test for root memcg given
-                * the current task's memcg won't help us in this case.
-                *
-                * Respecting the original socket's memcg is a better
-                * decision in this case.
-                */
-               if (sk->sk_cgrp) {
-                       BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
-                       css_get(&sk->sk_cgrp->memcg->css);
-                       return;
-               }
-
-               rcu_read_lock();
-               memcg = mem_cgroup_from_task(current);
-               cg_proto = sk->sk_prot->proto_cgroup(memcg);
-               if (cg_proto && test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags) &&
-                   css_tryget_online(&memcg->css)) {
-                       sk->sk_cgrp = cg_proto;
-               }
-               rcu_read_unlock();
-       }
-}
-EXPORT_SYMBOL(sock_update_memcg);
-
-void sock_release_memcg(struct sock *sk)
-{
-       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
-               struct mem_cgroup *memcg;
-               WARN_ON(!sk->sk_cgrp->memcg);
-               memcg = sk->sk_cgrp->memcg;
-               css_put(&sk->sk_cgrp->memcg->css);
-       }
-}
-
-struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
-{
-       if (!memcg || mem_cgroup_is_root(memcg))
-               return NULL;
-
-       return &memcg->tcp_mem;
-}
-EXPORT_SYMBOL(tcp_proto_cgroup);
-
-#endif
-
 #ifdef CONFIG_MEMCG_KMEM
 /*
  * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
@@ -395,7 +346,7 @@ void memcg_put_cache_ids(void)
  * conditional to this static branch, we'll have to allow modules that does
  * kmem_cache_alloc and the such to see this symbol as well
  */
-struct static_key memcg_kmem_enabled_key;
+DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
 EXPORT_SYMBOL(memcg_kmem_enabled_key);
 
 #endif /* CONFIG_MEMCG_KMEM */
@@ -1162,9 +1113,6 @@ bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
        return ret;
 }
 
-#define mem_cgroup_from_counter(counter, member)       \
-       container_of(counter, struct mem_cgroup, member)
-
 /**
  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
  * @memcg: the memory cgroup
@@ -1183,7 +1131,7 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
        if (count < limit)
                margin = limit - count;
 
-       if (do_swap_account) {
+       if (do_memsw_account()) {
                count = page_counter_read(&memcg->memsw);
                limit = READ_ONCE(memcg->memsw.limit);
                if (count <= limit)
@@ -1286,7 +1234,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
                pr_cont(":");
 
                for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
-                       if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
+                       if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
                                continue;
                        pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
                                K(mem_cgroup_read_stat(iter, i)));
@@ -1909,7 +1857,7 @@ static void drain_stock(struct memcg_stock_pcp *stock)
 
        if (stock->nr_pages) {
                page_counter_uncharge(&old->memory, stock->nr_pages);
-               if (do_swap_account)
+               if (do_memsw_account())
                        page_counter_uncharge(&old->memsw, stock->nr_pages);
                css_put_many(&old->css, stock->nr_pages);
                stock->nr_pages = 0;
@@ -1997,6 +1945,26 @@ static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
        return NOTIFY_OK;
 }
 
+static void reclaim_high(struct mem_cgroup *memcg,
+                        unsigned int nr_pages,
+                        gfp_t gfp_mask)
+{
+       do {
+               if (page_counter_read(&memcg->memory) <= memcg->high)
+                       continue;
+               mem_cgroup_events(memcg, MEMCG_HIGH, 1);
+               try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
+       } while ((memcg = parent_mem_cgroup(memcg)));
+}
+
+static void high_work_func(struct work_struct *work)
+{
+       struct mem_cgroup *memcg;
+
+       memcg = container_of(work, struct mem_cgroup, high_work);
+       reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
+}
+
 /*
  * Scheduled by try_charge() to be executed from the userland return path
  * and reclaims memory over the high limit.
@@ -2004,20 +1972,13 @@ static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
 void mem_cgroup_handle_over_high(void)
 {
        unsigned int nr_pages = current->memcg_nr_pages_over_high;
-       struct mem_cgroup *memcg, *pos;
+       struct mem_cgroup *memcg;
 
        if (likely(!nr_pages))
                return;
 
-       pos = memcg = get_mem_cgroup_from_mm(current->mm);
-
-       do {
-               if (page_counter_read(&pos->memory) <= pos->high)
-                       continue;
-               mem_cgroup_events(pos, MEMCG_HIGH, 1);
-               try_to_free_mem_cgroup_pages(pos, nr_pages, GFP_KERNEL, true);
-       } while ((pos = parent_mem_cgroup(pos)));
-
+       memcg = get_mem_cgroup_from_mm(current->mm);
+       reclaim_high(memcg, nr_pages, GFP_KERNEL);
        css_put(&memcg->css);
        current->memcg_nr_pages_over_high = 0;
 }
@@ -2039,11 +2000,11 @@ retry:
        if (consume_stock(memcg, nr_pages))
                return 0;
 
-       if (!do_swap_account ||
+       if (!do_memsw_account() ||
            page_counter_try_charge(&memcg->memsw, batch, &counter)) {
                if (page_counter_try_charge(&memcg->memory, batch, &counter))
                        goto done_restock;
-               if (do_swap_account)
+               if (do_memsw_account())
                        page_counter_uncharge(&memcg->memsw, batch);
                mem_over_limit = mem_cgroup_from_counter(counter, memory);
        } else {
@@ -2130,7 +2091,7 @@ force:
         * temporarily by force charging it.
         */
        page_counter_charge(&memcg->memory, nr_pages);
-       if (do_swap_account)
+       if (do_memsw_account())
                page_counter_charge(&memcg->memsw, nr_pages);
        css_get_many(&memcg->css, nr_pages);
 
@@ -2152,6 +2113,11 @@ done_restock:
         */
        do {
                if (page_counter_read(&memcg->memory) > memcg->high) {
+                       /* Don't bother a random interrupted task */
+                       if (in_interrupt()) {
+                               schedule_work(&memcg->high_work);
+                               break;
+                       }
                        current->memcg_nr_pages_over_high += batch;
                        set_notify_resume(current);
                        break;
@@ -2167,7 +2133,7 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
                return;
 
        page_counter_uncharge(&memcg->memory, nr_pages);
-       if (do_swap_account)
+       if (do_memsw_account())
                page_counter_uncharge(&memcg->memsw, nr_pages);
 
        css_put_many(&memcg->css, nr_pages);
@@ -2356,7 +2322,7 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
  * Can't be called in interrupt context or from kernel threads.
  * This function needs to be called with rcu_read_lock() held.
  */
-struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
+struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
 {
        struct mem_cgroup *memcg;
        struct kmem_cache *memcg_cachep;
@@ -2364,6 +2330,12 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
 
        VM_BUG_ON(!is_root_cache(cachep));
 
+       if (cachep->flags & SLAB_ACCOUNT)
+               gfp |= __GFP_ACCOUNT;
+
+       if (!(gfp & __GFP_ACCOUNT))
+               return cachep;
+
        if (current->memcg_kmem_skip_account)
                return cachep;
 
@@ -2447,7 +2419,7 @@ void __memcg_kmem_uncharge(struct page *page, int order)
 
        page_counter_uncharge(&memcg->kmem, nr_pages);
        page_counter_uncharge(&memcg->memory, nr_pages);
-       if (do_swap_account)
+       if (do_memsw_account())
                page_counter_uncharge(&memcg->memsw, nr_pages);
 
        page->mem_cgroup = NULL;
@@ -2935,7 +2907,7 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
        err = page_counter_limit(&memcg->kmem, nr_pages);
        VM_BUG_ON(err);
 
-       static_key_slow_inc(&memcg_kmem_enabled_key);
+       static_branch_inc(&memcg_kmem_enabled_key);
        /*
         * A memory cgroup is considered kmem-active as soon as it gets
         * kmemcg_id. Setting the id after enabling static branching will
@@ -3162,7 +3134,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
        BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
 
        for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
-               if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
+               if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
                        continue;
                seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
                           mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
@@ -3184,14 +3156,14 @@ static int memcg_stat_show(struct seq_file *m, void *v)
        }
        seq_printf(m, "hierarchical_memory_limit %llu\n",
                   (u64)memory * PAGE_SIZE);
-       if (do_swap_account)
+       if (do_memsw_account())
                seq_printf(m, "hierarchical_memsw_limit %llu\n",
                           (u64)memsw * PAGE_SIZE);
 
        for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
                unsigned long long val = 0;
 
-               if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
+               if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
                        continue;
                for_each_mem_cgroup_tree(mi, memcg)
                        val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
@@ -3322,7 +3294,7 @@ static void mem_cgroup_threshold(struct mem_cgroup *memcg)
 {
        while (memcg) {
                __mem_cgroup_threshold(memcg, false);
-               if (do_swap_account)
+               if (do_memsw_account())
                        __mem_cgroup_threshold(memcg, true);
 
                memcg = parent_mem_cgroup(memcg);
@@ -3621,7 +3593,7 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
        if (ret)
                return ret;
 
-       return mem_cgroup_sockets_init(memcg, ss);
+       return tcp_init_cgroup(memcg, ss);
 }
 
 static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
@@ -3674,10 +3646,10 @@ static void memcg_destroy_kmem(struct mem_cgroup *memcg)
 {
        if (memcg->kmem_acct_activated) {
                memcg_destroy_kmem_caches(memcg);
-               static_key_slow_dec(&memcg_kmem_enabled_key);
+               static_branch_dec(&memcg_kmem_enabled_key);
                WARN_ON(page_counter_read(&memcg->kmem));
        }
-       mem_cgroup_sockets_destroy(memcg);
+       tcp_destroy_cgroup(memcg);
 }
 #else
 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
@@ -4196,6 +4168,8 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
 {
        int node;
 
+       cancel_work_sync(&memcg->high_work);
+
        mem_cgroup_remove_from_trees(memcg);
 
        for_each_node(node)
@@ -4206,17 +4180,6 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
        kfree(memcg);
 }
 
-/*
- * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
- */
-struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
-{
-       if (!memcg->memory.parent)
-               return NULL;
-       return mem_cgroup_from_counter(memcg->memory.parent, memory);
-}
-EXPORT_SYMBOL(parent_mem_cgroup);
-
 static struct cgroup_subsys_state * __ref
 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
@@ -4235,7 +4198,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        /* root ? */
        if (parent_css == NULL) {
                root_mem_cgroup = memcg;
-               mem_cgroup_root_css = &memcg->css;
                page_counter_init(&memcg->memory, NULL);
                memcg->high = PAGE_COUNTER_MAX;
                memcg->soft_limit = PAGE_COUNTER_MAX;
@@ -4243,6 +4205,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
                page_counter_init(&memcg->kmem, NULL);
        }
 
+       INIT_WORK(&memcg->high_work, high_work_func);
        memcg->last_scanned_node = MAX_NUMNODES;
        INIT_LIST_HEAD(&memcg->oom_notify);
        memcg->move_charge_at_immigrate = 0;
@@ -4256,6 +4219,9 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 #endif
 #ifdef CONFIG_CGROUP_WRITEBACK
        INIT_LIST_HEAD(&memcg->cgwb_list);
+#endif
+#ifdef CONFIG_INET
+       memcg->socket_pressure = jiffies;
 #endif
        return &memcg->css;
 
@@ -4314,6 +4280,11 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
        if (ret)
                return ret;
 
+#ifdef CONFIG_INET
+       if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
+               static_branch_inc(&memcg_sockets_enabled_key);
+#endif
+
        /*
         * Make sure the memcg is initialized: mem_cgroup_iter()
         * orders reading memcg->initialized against its callers
@@ -4360,6 +4331,10 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
        memcg_destroy_kmem(memcg);
+#ifdef CONFIG_INET
+       if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
+               static_branch_dec(&memcg_sockets_enabled_key);
+#endif
        __mem_cgroup_free(memcg);
 }
 
@@ -4476,7 +4451,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
         * we call find_get_page() with swapper_space directly.
         */
        page = find_get_page(swap_address_space(ent), ent.val);
-       if (do_swap_account)
+       if (do_memsw_account())
                entry->val = ent.val;
 
        return page;
@@ -4511,7 +4486,7 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
                page = find_get_entry(mapping, pgoff);
                if (radix_tree_exceptional_entry(page)) {
                        swp_entry_t swp = radix_to_swp_entry(page);
-                       if (do_swap_account)
+                       if (do_memsw_account())
                                *entry = swp;
                        page = find_get_page(swap_address_space(swp), swp.val);
                }
@@ -4813,7 +4788,7 @@ static void mem_cgroup_clear_mc(void)
 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
 {
        struct cgroup_subsys_state *css;
-       struct mem_cgroup *memcg;
+       struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
        struct mem_cgroup *from;
        struct task_struct *leader, *p;
        struct mm_struct *mm;
@@ -5304,7 +5279,7 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
                if (page->mem_cgroup)
                        goto out;
 
-               if (do_swap_account) {
+               if (do_memsw_account()) {
                        swp_entry_t ent = { .val = page_private(page), };
                        unsigned short id = lookup_swap_cgroup_id(ent);
 
@@ -5378,7 +5353,7 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
        memcg_check_events(memcg, page);
        local_irq_enable();
 
-       if (do_swap_account && PageSwapCache(page)) {
+       if (do_memsw_account() && PageSwapCache(page)) {
                swp_entry_t entry = { .val = page_private(page) };
                /*
                 * The swap entry might not get freed for a long time,
@@ -5427,7 +5402,7 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
 
        if (!mem_cgroup_is_root(memcg)) {
                page_counter_uncharge(&memcg->memory, nr_pages);
-               if (do_swap_account)
+               if (do_memsw_account())
                        page_counter_uncharge(&memcg->memsw, nr_pages);
                memcg_oom_recover(memcg);
        }
@@ -5580,6 +5555,121 @@ void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
        commit_charge(newpage, memcg, true);
 }
 
+#ifdef CONFIG_INET
+
+DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
+EXPORT_SYMBOL(memcg_sockets_enabled_key);
+
+void sock_update_memcg(struct sock *sk)
+{
+       struct mem_cgroup *memcg;
+
+       /* Socket cloning can throw us here with sk_cgrp already
+        * filled. It won't however, necessarily happen from
+        * process context. So the test for root memcg given
+        * the current task's memcg won't help us in this case.
+        *
+        * Respecting the original socket's memcg is a better
+        * decision in this case.
+        */
+       if (sk->sk_memcg) {
+               BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
+               css_get(&sk->sk_memcg->css);
+               return;
+       }
+
+       rcu_read_lock();
+       memcg = mem_cgroup_from_task(current);
+       if (memcg == root_mem_cgroup)
+               goto out;
+#ifdef CONFIG_MEMCG_KMEM
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcp_mem.active)
+               goto out;
+#endif
+       if (css_tryget_online(&memcg->css))
+               sk->sk_memcg = memcg;
+out:
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL(sock_update_memcg);
+
+void sock_release_memcg(struct sock *sk)
+{
+       WARN_ON(!sk->sk_memcg);
+       css_put(&sk->sk_memcg->css);
+}
+
+/**
+ * mem_cgroup_charge_skmem - charge socket memory
+ * @memcg: memcg to charge
+ * @nr_pages: number of pages to charge
+ *
+ * Charges @nr_pages to @memcg. Returns %true if the charge fit within
+ * @memcg's configured limit, %false if the charge had to be forced.
+ */
+bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
+{
+       gfp_t gfp_mask = GFP_KERNEL;
+
+#ifdef CONFIG_MEMCG_KMEM
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
+               struct page_counter *counter;
+
+               if (page_counter_try_charge(&memcg->tcp_mem.memory_allocated,
+                                           nr_pages, &counter)) {
+                       memcg->tcp_mem.memory_pressure = 0;
+                       return true;
+               }
+               page_counter_charge(&memcg->tcp_mem.memory_allocated, nr_pages);
+               memcg->tcp_mem.memory_pressure = 1;
+               return false;
+       }
+#endif
+       /* Don't block in the packet receive path */
+       if (in_softirq())
+               gfp_mask = GFP_NOWAIT;
+
+       if (try_charge(memcg, gfp_mask, nr_pages) == 0)
+               return true;
+
+       try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
+       return false;
+}
+
+/**
+ * mem_cgroup_uncharge_skmem - uncharge socket memory
+ * @memcg - memcg to uncharge
+ * @nr_pages - number of pages to uncharge
+ */
+void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
+{
+#ifdef CONFIG_MEMCG_KMEM
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
+               page_counter_uncharge(&memcg->tcp_mem.memory_allocated,
+                                     nr_pages);
+               return;
+       }
+#endif
+       page_counter_uncharge(&memcg->memory, nr_pages);
+       css_put_many(&memcg->css, nr_pages);
+}
+
+#endif /* CONFIG_INET */
+
+static int __init cgroup_memory(char *s)
+{
+       char *token;
+
+       while ((token = strsep(&s, ",")) != NULL) {
+               if (!*token)
+                       continue;
+               if (!strcmp(token, "nosocket"))
+                       cgroup_memory_nosocket = true;
+       }
+       return 0;
+}
+__setup("cgroup.memory=", cgroup_memory);
+
 /*
  * subsys_initcall() for memory controller.
  *
@@ -5635,7 +5725,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        VM_BUG_ON_PAGE(PageLRU(page), page);
        VM_BUG_ON_PAGE(page_count(page), page);
 
-       if (!do_swap_account)
+       if (!do_memsw_account())
                return;
 
        memcg = page->mem_cgroup;
@@ -5675,7 +5765,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry)
        struct mem_cgroup *memcg;
        unsigned short id;
 
-       if (!do_swap_account)
+       if (!do_memsw_account())
                return;
 
        id = swap_cgroup_record(entry, 0);