]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/memcontrol.c
mm: memcontrol: re-use global VM event enum
[karo-tx-linux.git] / mm / memcontrol.c
index 2bd7541d7c11231431c060ca6cfe84a89f096fe3..6b42887e5f1459a098c8acdc48ec5d020f09dde0 100644 (file)
@@ -104,19 +104,13 @@ static const char * const mem_cgroup_stat_names[] = {
        "cache",
        "rss",
        "rss_huge",
+       "shmem",
        "mapped_file",
        "dirty",
        "writeback",
        "swap",
 };
 
-static const char * const mem_cgroup_events_names[] = {
-       "pgpgin",
-       "pgpgout",
-       "pgfault",
-       "pgmajfault",
-};
-
 static const char * const mem_cgroup_lru_names[] = {
        "inactive_anon",
        "active_anon",
@@ -568,32 +562,15 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
  * common workload, threshold and synchronization as vmstat[] should be
  * implemented.
  */
-static unsigned long
-mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
-{
-       long val = 0;
-       int cpu;
-
-       /* Per-cpu values can be negative, use a signed accumulator */
-       for_each_possible_cpu(cpu)
-               val += per_cpu(memcg->stat->count[idx], cpu);
-       /*
-        * Summing races with updates, so val may be negative.  Avoid exposing
-        * transient negative values.
-        */
-       if (val < 0)
-               val = 0;
-       return val;
-}
 
 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
-                                           enum mem_cgroup_events_index idx)
+                                           enum memcg_event_item event)
 {
        unsigned long val = 0;
        int cpu;
 
        for_each_possible_cpu(cpu)
-               val += per_cpu(memcg->stat->events[idx], cpu);
+               val += per_cpu(memcg->stat->events[event], cpu);
        return val;
 }
 
@@ -608,9 +585,13 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
        if (PageAnon(page))
                __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
                                nr_pages);
-       else
+       else {
                __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
                                nr_pages);
+               if (PageSwapBacked(page))
+                       __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SHMEM],
+                                      nr_pages);
+       }
 
        if (compound) {
                VM_BUG_ON_PAGE(!PageTransHuge(page), page);
@@ -620,9 +601,9 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 
        /* pagein of a big page is an event. So, ignore page size */
        if (nr_pages > 0)
-               __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
+               __this_cpu_inc(memcg->stat->events[PGPGIN]);
        else {
-               __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
+               __this_cpu_inc(memcg->stat->events[PGPGOUT]);
                nr_pages = -nr_pages; /* for event */
        }
 
@@ -1837,7 +1818,7 @@ static void reclaim_high(struct mem_cgroup *memcg,
        do {
                if (page_counter_read(&memcg->memory) <= memcg->high)
                        continue;
-               mem_cgroup_events(memcg, MEMCG_HIGH, 1);
+               mem_cgroup_event(memcg, MEMCG_HIGH);
                try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
        } while ((memcg = parent_mem_cgroup(memcg)));
 }
@@ -1928,7 +1909,7 @@ retry:
        if (!gfpflags_allow_blocking(gfp_mask))
                goto nomem;
 
-       mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
+       mem_cgroup_event(mem_over_limit, MEMCG_MAX);
 
        nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
                                                    gfp_mask, may_swap);
@@ -1971,7 +1952,7 @@ retry:
        if (fatal_signal_pending(current))
                goto force;
 
-       mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
+       mem_cgroup_event(mem_over_limit, MEMCG_OOM);
 
        mem_cgroup_oom(mem_over_limit, gfp_mask,
                       get_order(nr_pages * PAGE_SIZE));
@@ -3131,6 +3112,21 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
 }
 #endif /* CONFIG_NUMA */
 
+/* Universal VM events cgroup1 shows, original sort order */
+unsigned int memcg1_events[] = {
+       PGPGIN,
+       PGPGOUT,
+       PGFAULT,
+       PGMAJFAULT,
+};
+
+static const char *const memcg1_event_names[] = {
+       "pgpgin",
+       "pgpgout",
+       "pgfault",
+       "pgmajfault",
+};
+
 static int memcg_stat_show(struct seq_file *m, void *v)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
@@ -3140,8 +3136,6 @@ static int memcg_stat_show(struct seq_file *m, void *v)
 
        BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
                     MEM_CGROUP_STAT_NSTATS);
-       BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
-                    MEM_CGROUP_EVENTS_NSTATS);
        BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
 
        for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
@@ -3151,9 +3145,9 @@ static int memcg_stat_show(struct seq_file *m, void *v)
                           mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
        }
 
-       for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
-               seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
-                          mem_cgroup_read_events(memcg, i));
+       for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
+               seq_printf(m, "%s %lu\n", memcg1_event_names[i],
+                          mem_cgroup_read_events(memcg, memcg1_events[i]));
 
        for (i = 0; i < NR_LRU_LISTS; i++)
                seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
@@ -3181,13 +3175,12 @@ static int memcg_stat_show(struct seq_file *m, void *v)
                seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
        }
 
-       for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
+       for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) {
                unsigned long long val = 0;
 
                for_each_mem_cgroup_tree(mi, memcg)
-                       val += mem_cgroup_read_events(mi, i);
-               seq_printf(m, "total_%s %llu\n",
-                          mem_cgroup_events_names[i], val);
+                       val += mem_cgroup_read_events(mi, memcg1_events[i]);
+               seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val);
        }
 
        for (i = 0; i < NR_LRU_LISTS; i++) {
@@ -5154,7 +5147,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
                        continue;
                }
 
-               mem_cgroup_events(memcg, MEMCG_OOM, 1);
+               mem_cgroup_event(memcg, MEMCG_OOM);
                if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
                        break;
        }
@@ -5208,6 +5201,8 @@ static int memory_stat_show(struct seq_file *m, void *v)
        seq_printf(m, "sock %llu\n",
                   (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
 
+       seq_printf(m, "shmem %llu\n",
+                  (u64)stat[MEM_CGROUP_STAT_SHMEM] * PAGE_SIZE);
        seq_printf(m, "file_mapped %llu\n",
                   (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
        seq_printf(m, "file_dirty %llu\n",
@@ -5232,10 +5227,15 @@ static int memory_stat_show(struct seq_file *m, void *v)
 
        /* Accumulated memory events */
 
-       seq_printf(m, "pgfault %lu\n",
-                  events[MEM_CGROUP_EVENTS_PGFAULT]);
-       seq_printf(m, "pgmajfault %lu\n",
-                  events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
+       seq_printf(m, "pgfault %lu\n", events[PGFAULT]);
+       seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]);
+
+       seq_printf(m, "workingset_refault %lu\n",
+                  stat[MEMCG_WORKINGSET_REFAULT]);
+       seq_printf(m, "workingset_activate %lu\n",
+                  stat[MEMCG_WORKINGSET_ACTIVATE]);
+       seq_printf(m, "workingset_nodereclaim %lu\n",
+                  stat[MEMCG_WORKINGSET_NODERECLAIM]);
 
        return 0;
 }
@@ -5476,8 +5476,8 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
 
 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
                           unsigned long nr_anon, unsigned long nr_file,
-                          unsigned long nr_huge, unsigned long nr_kmem,
-                          struct page *dummy_page)
+                          unsigned long nr_kmem, unsigned long nr_huge,
+                          unsigned long nr_shmem, struct page *dummy_page)
 {
        unsigned long nr_pages = nr_anon + nr_file + nr_kmem;
        unsigned long flags;
@@ -5495,7 +5495,8 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
        __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
        __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
        __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
-       __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
+       __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_SHMEM], nr_shmem);
+       __this_cpu_add(memcg->stat->events[PGPGOUT], pgpgout);
        __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
        memcg_check_events(memcg, dummy_page);
        local_irq_restore(flags);
@@ -5507,6 +5508,7 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
 static void uncharge_list(struct list_head *page_list)
 {
        struct mem_cgroup *memcg = NULL;
+       unsigned long nr_shmem = 0;
        unsigned long nr_anon = 0;
        unsigned long nr_file = 0;
        unsigned long nr_huge = 0;
@@ -5539,9 +5541,9 @@ static void uncharge_list(struct list_head *page_list)
                if (memcg != page->mem_cgroup) {
                        if (memcg) {
                                uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
-                                              nr_huge, nr_kmem, page);
-                               pgpgout = nr_anon = nr_file =
-                                       nr_huge = nr_kmem = 0;
+                                              nr_kmem, nr_huge, nr_shmem, page);
+                               pgpgout = nr_anon = nr_file = nr_kmem = 0;
+                               nr_huge = nr_shmem = 0;
                        }
                        memcg = page->mem_cgroup;
                }
@@ -5555,8 +5557,11 @@ static void uncharge_list(struct list_head *page_list)
                        }
                        if (PageAnon(page))
                                nr_anon += nr_pages;
-                       else
+                       else {
                                nr_file += nr_pages;
+                               if (PageSwapBacked(page))
+                                       nr_shmem += nr_pages;
+                       }
                        pgpgout++;
                } else {
                        nr_kmem += 1 << compound_order(page);
@@ -5568,7 +5573,7 @@ static void uncharge_list(struct list_head *page_list)
 
        if (memcg)
                uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
-                              nr_huge, nr_kmem, page);
+                              nr_kmem, nr_huge, nr_shmem, page);
 }
 
 /**