]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - mm/memcontrol.c
ACPI: DMI init_set_sci_en_on_resume for multiple Lenovo ThinkPads
[karo-tx-linux.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
23 #include <linux/mm.h>
24 #include <linux/pagemap.h>
25 #include <linux/smp.h>
26 #include <linux/page-flags.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bit_spinlock.h>
29 #include <linux/rcupdate.h>
30 #include <linux/limits.h>
31 #include <linux/mutex.h>
32 #include <linux/rbtree.h>
33 #include <linux/slab.h>
34 #include <linux/swap.h>
35 #include <linux/spinlock.h>
36 #include <linux/fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/vmalloc.h>
39 #include <linux/mm_inline.h>
40 #include <linux/page_cgroup.h>
41 #include "internal.h"
42
43 #include <asm/uaccess.h>
44
45 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
46 #define MEM_CGROUP_RECLAIM_RETRIES      5
47 struct mem_cgroup *root_mem_cgroup __read_mostly;
48
49 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
50 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
51 int do_swap_account __read_mostly;
52 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
53 #else
54 #define do_swap_account         (0)
55 #endif
56
57 static DEFINE_MUTEX(memcg_tasklist);    /* can be hold under cgroup_mutex */
58 #define SOFTLIMIT_EVENTS_THRESH (1000)
59
60 /*
61  * Statistics for memory cgroup.
62  */
63 enum mem_cgroup_stat_index {
64         /*
65          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
66          */
67         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
68         MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
69         MEM_CGROUP_STAT_MAPPED_FILE,  /* # of pages charged as file rss */
70         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
71         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
72         MEM_CGROUP_STAT_EVENTS, /* sum of pagein + pageout for internal use */
73         MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
74
75         MEM_CGROUP_STAT_NSTATS,
76 };
77
78 struct mem_cgroup_stat_cpu {
79         s64 count[MEM_CGROUP_STAT_NSTATS];
80 } ____cacheline_aligned_in_smp;
81
82 struct mem_cgroup_stat {
83         struct mem_cgroup_stat_cpu cpustat[0];
84 };
85
86 static inline void
87 __mem_cgroup_stat_reset_safe(struct mem_cgroup_stat_cpu *stat,
88                                 enum mem_cgroup_stat_index idx)
89 {
90         stat->count[idx] = 0;
91 }
92
93 static inline s64
94 __mem_cgroup_stat_read_local(struct mem_cgroup_stat_cpu *stat,
95                                 enum mem_cgroup_stat_index idx)
96 {
97         return stat->count[idx];
98 }
99
100 /*
101  * For accounting under irq disable, no need for increment preempt count.
102  */
103 static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
104                 enum mem_cgroup_stat_index idx, int val)
105 {
106         stat->count[idx] += val;
107 }
108
109 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
110                 enum mem_cgroup_stat_index idx)
111 {
112         int cpu;
113         s64 ret = 0;
114         for_each_possible_cpu(cpu)
115                 ret += stat->cpustat[cpu].count[idx];
116         return ret;
117 }
118
119 static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat)
120 {
121         s64 ret;
122
123         ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE);
124         ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS);
125         return ret;
126 }
127
128 /*
129  * per-zone information in memory controller.
130  */
131 struct mem_cgroup_per_zone {
132         /*
133          * spin_lock to protect the per cgroup LRU
134          */
135         struct list_head        lists[NR_LRU_LISTS];
136         unsigned long           count[NR_LRU_LISTS];
137
138         struct zone_reclaim_stat reclaim_stat;
139         struct rb_node          tree_node;      /* RB tree node */
140         unsigned long long      usage_in_excess;/* Set to the value by which */
141                                                 /* the soft limit is exceeded*/
142         bool                    on_tree;
143         struct mem_cgroup       *mem;           /* Back pointer, we cannot */
144                                                 /* use container_of        */
145 };
146 /* Macro for accessing counter */
147 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
148
149 struct mem_cgroup_per_node {
150         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
151 };
152
153 struct mem_cgroup_lru_info {
154         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
155 };
156
157 /*
158  * Cgroups above their limits are maintained in a RB-Tree, independent of
159  * their hierarchy representation
160  */
161
162 struct mem_cgroup_tree_per_zone {
163         struct rb_root rb_root;
164         spinlock_t lock;
165 };
166
167 struct mem_cgroup_tree_per_node {
168         struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
169 };
170
171 struct mem_cgroup_tree {
172         struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
173 };
174
175 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
176
177 /*
178  * The memory controller data structure. The memory controller controls both
179  * page cache and RSS per cgroup. We would eventually like to provide
180  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
181  * to help the administrator determine what knobs to tune.
182  *
183  * TODO: Add a water mark for the memory controller. Reclaim will begin when
184  * we hit the water mark. May be even add a low water mark, such that
185  * no reclaim occurs from a cgroup at it's low water mark, this is
186  * a feature that will be implemented much later in the future.
187  */
188 struct mem_cgroup {
189         struct cgroup_subsys_state css;
190         /*
191          * the counter to account for memory usage
192          */
193         struct res_counter res;
194         /*
195          * the counter to account for mem+swap usage.
196          */
197         struct res_counter memsw;
198         /*
199          * Per cgroup active and inactive list, similar to the
200          * per zone LRU lists.
201          */
202         struct mem_cgroup_lru_info info;
203
204         /*
205           protect against reclaim related member.
206         */
207         spinlock_t reclaim_param_lock;
208
209         int     prev_priority;  /* for recording reclaim priority */
210
211         /*
212          * While reclaiming in a hiearchy, we cache the last child we
213          * reclaimed from.
214          */
215         int last_scanned_child;
216         /*
217          * Should the accounting and control be hierarchical, per subtree?
218          */
219         bool use_hierarchy;
220         unsigned long   last_oom_jiffies;
221         atomic_t        refcnt;
222
223         unsigned int    swappiness;
224
225         /* set when res.limit == memsw.limit */
226         bool            memsw_is_minimum;
227
228         /*
229          * statistics. This must be placed at the end of memcg.
230          */
231         struct mem_cgroup_stat stat;
232 };
233
234 /*
235  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
236  * limit reclaim to prevent infinite loops, if they ever occur.
237  */
238 #define MEM_CGROUP_MAX_RECLAIM_LOOPS            (100)
239 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
240
241 enum charge_type {
242         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
243         MEM_CGROUP_CHARGE_TYPE_MAPPED,
244         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
245         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
246         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
247         MEM_CGROUP_CHARGE_TYPE_DROP,    /* a page was unused swap cache */
248         NR_CHARGE_TYPE,
249 };
250
251 /* only for here (for easy reading.) */
252 #define PCGF_CACHE      (1UL << PCG_CACHE)
253 #define PCGF_USED       (1UL << PCG_USED)
254 #define PCGF_LOCK       (1UL << PCG_LOCK)
255 /* Not used, but added here for completeness */
256 #define PCGF_ACCT       (1UL << PCG_ACCT)
257
258 /* for encoding cft->private value on file */
259 #define _MEM                    (0)
260 #define _MEMSWAP                (1)
261 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
262 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
263 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
264
265 /*
266  * Reclaim flags for mem_cgroup_hierarchical_reclaim
267  */
268 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT   0x0
269 #define MEM_CGROUP_RECLAIM_NOSWAP       (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
270 #define MEM_CGROUP_RECLAIM_SHRINK_BIT   0x1
271 #define MEM_CGROUP_RECLAIM_SHRINK       (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
272 #define MEM_CGROUP_RECLAIM_SOFT_BIT     0x2
273 #define MEM_CGROUP_RECLAIM_SOFT         (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
274
275 static void mem_cgroup_get(struct mem_cgroup *mem);
276 static void mem_cgroup_put(struct mem_cgroup *mem);
277 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
278
279 static struct mem_cgroup_per_zone *
280 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
281 {
282         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
283 }
284
285 static struct mem_cgroup_per_zone *
286 page_cgroup_zoneinfo(struct page_cgroup *pc)
287 {
288         struct mem_cgroup *mem = pc->mem_cgroup;
289         int nid = page_cgroup_nid(pc);
290         int zid = page_cgroup_zid(pc);
291
292         if (!mem)
293                 return NULL;
294
295         return mem_cgroup_zoneinfo(mem, nid, zid);
296 }
297
298 static struct mem_cgroup_tree_per_zone *
299 soft_limit_tree_node_zone(int nid, int zid)
300 {
301         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
302 }
303
304 static struct mem_cgroup_tree_per_zone *
305 soft_limit_tree_from_page(struct page *page)
306 {
307         int nid = page_to_nid(page);
308         int zid = page_zonenum(page);
309
310         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
311 }
312
313 static void
314 __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
315                                 struct mem_cgroup_per_zone *mz,
316                                 struct mem_cgroup_tree_per_zone *mctz,
317                                 unsigned long long new_usage_in_excess)
318 {
319         struct rb_node **p = &mctz->rb_root.rb_node;
320         struct rb_node *parent = NULL;
321         struct mem_cgroup_per_zone *mz_node;
322
323         if (mz->on_tree)
324                 return;
325
326         mz->usage_in_excess = new_usage_in_excess;
327         if (!mz->usage_in_excess)
328                 return;
329         while (*p) {
330                 parent = *p;
331                 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
332                                         tree_node);
333                 if (mz->usage_in_excess < mz_node->usage_in_excess)
334                         p = &(*p)->rb_left;
335                 /*
336                  * We can't avoid mem cgroups that are over their soft
337                  * limit by the same amount
338                  */
339                 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
340                         p = &(*p)->rb_right;
341         }
342         rb_link_node(&mz->tree_node, parent, p);
343         rb_insert_color(&mz->tree_node, &mctz->rb_root);
344         mz->on_tree = true;
345 }
346
347 static void
348 __mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
349                                 struct mem_cgroup_per_zone *mz,
350                                 struct mem_cgroup_tree_per_zone *mctz)
351 {
352         if (!mz->on_tree)
353                 return;
354         rb_erase(&mz->tree_node, &mctz->rb_root);
355         mz->on_tree = false;
356 }
357
358 static void
359 mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
360                                 struct mem_cgroup_per_zone *mz,
361                                 struct mem_cgroup_tree_per_zone *mctz)
362 {
363         spin_lock(&mctz->lock);
364         __mem_cgroup_remove_exceeded(mem, mz, mctz);
365         spin_unlock(&mctz->lock);
366 }
367
368 static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
369 {
370         bool ret = false;
371         int cpu;
372         s64 val;
373         struct mem_cgroup_stat_cpu *cpustat;
374
375         cpu = get_cpu();
376         cpustat = &mem->stat.cpustat[cpu];
377         val = __mem_cgroup_stat_read_local(cpustat, MEM_CGROUP_STAT_EVENTS);
378         if (unlikely(val > SOFTLIMIT_EVENTS_THRESH)) {
379                 __mem_cgroup_stat_reset_safe(cpustat, MEM_CGROUP_STAT_EVENTS);
380                 ret = true;
381         }
382         put_cpu();
383         return ret;
384 }
385
386 static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
387 {
388         unsigned long long excess;
389         struct mem_cgroup_per_zone *mz;
390         struct mem_cgroup_tree_per_zone *mctz;
391         int nid = page_to_nid(page);
392         int zid = page_zonenum(page);
393         mctz = soft_limit_tree_from_page(page);
394
395         /*
396          * Necessary to update all ancestors when hierarchy is used.
397          * because their event counter is not touched.
398          */
399         for (; mem; mem = parent_mem_cgroup(mem)) {
400                 mz = mem_cgroup_zoneinfo(mem, nid, zid);
401                 excess = res_counter_soft_limit_excess(&mem->res);
402                 /*
403                  * We have to update the tree if mz is on RB-tree or
404                  * mem is over its softlimit.
405                  */
406                 if (excess || mz->on_tree) {
407                         spin_lock(&mctz->lock);
408                         /* if on-tree, remove it */
409                         if (mz->on_tree)
410                                 __mem_cgroup_remove_exceeded(mem, mz, mctz);
411                         /*
412                          * Insert again. mz->usage_in_excess will be updated.
413                          * If excess is 0, no tree ops.
414                          */
415                         __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
416                         spin_unlock(&mctz->lock);
417                 }
418         }
419 }
420
421 static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
422 {
423         int node, zone;
424         struct mem_cgroup_per_zone *mz;
425         struct mem_cgroup_tree_per_zone *mctz;
426
427         for_each_node_state(node, N_POSSIBLE) {
428                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
429                         mz = mem_cgroup_zoneinfo(mem, node, zone);
430                         mctz = soft_limit_tree_node_zone(node, zone);
431                         mem_cgroup_remove_exceeded(mem, mz, mctz);
432                 }
433         }
434 }
435
436 static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
437 {
438         return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
439 }
440
441 static struct mem_cgroup_per_zone *
442 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
443 {
444         struct rb_node *rightmost = NULL;
445         struct mem_cgroup_per_zone *mz;
446
447 retry:
448         mz = NULL;
449         rightmost = rb_last(&mctz->rb_root);
450         if (!rightmost)
451                 goto done;              /* Nothing to reclaim from */
452
453         mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
454         /*
455          * Remove the node now but someone else can add it back,
456          * we will to add it back at the end of reclaim to its correct
457          * position in the tree.
458          */
459         __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
460         if (!res_counter_soft_limit_excess(&mz->mem->res) ||
461                 !css_tryget(&mz->mem->css))
462                 goto retry;
463 done:
464         return mz;
465 }
466
467 static struct mem_cgroup_per_zone *
468 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
469 {
470         struct mem_cgroup_per_zone *mz;
471
472         spin_lock(&mctz->lock);
473         mz = __mem_cgroup_largest_soft_limit_node(mctz);
474         spin_unlock(&mctz->lock);
475         return mz;
476 }
477
478 static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
479                                          bool charge)
480 {
481         int val = (charge) ? 1 : -1;
482         struct mem_cgroup_stat *stat = &mem->stat;
483         struct mem_cgroup_stat_cpu *cpustat;
484         int cpu = get_cpu();
485
486         cpustat = &stat->cpustat[cpu];
487         __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_SWAPOUT, val);
488         put_cpu();
489 }
490
491 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
492                                          struct page_cgroup *pc,
493                                          bool charge)
494 {
495         int val = (charge) ? 1 : -1;
496         struct mem_cgroup_stat *stat = &mem->stat;
497         struct mem_cgroup_stat_cpu *cpustat;
498         int cpu = get_cpu();
499
500         cpustat = &stat->cpustat[cpu];
501         if (PageCgroupCache(pc))
502                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
503         else
504                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
505
506         if (charge)
507                 __mem_cgroup_stat_add_safe(cpustat,
508                                 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
509         else
510                 __mem_cgroup_stat_add_safe(cpustat,
511                                 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
512         __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_EVENTS, 1);
513         put_cpu();
514 }
515
516 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
517                                         enum lru_list idx)
518 {
519         int nid, zid;
520         struct mem_cgroup_per_zone *mz;
521         u64 total = 0;
522
523         for_each_online_node(nid)
524                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
525                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
526                         total += MEM_CGROUP_ZSTAT(mz, idx);
527                 }
528         return total;
529 }
530
531 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
532 {
533         return container_of(cgroup_subsys_state(cont,
534                                 mem_cgroup_subsys_id), struct mem_cgroup,
535                                 css);
536 }
537
538 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
539 {
540         /*
541          * mm_update_next_owner() may clear mm->owner to NULL
542          * if it races with swapoff, page migration, etc.
543          * So this can be called with p == NULL.
544          */
545         if (unlikely(!p))
546                 return NULL;
547
548         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
549                                 struct mem_cgroup, css);
550 }
551
552 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
553 {
554         struct mem_cgroup *mem = NULL;
555
556         if (!mm)
557                 return NULL;
558         /*
559          * Because we have no locks, mm->owner's may be being moved to other
560          * cgroup. We use css_tryget() here even if this looks
561          * pessimistic (rather than adding locks here).
562          */
563         rcu_read_lock();
564         do {
565                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
566                 if (unlikely(!mem))
567                         break;
568         } while (!css_tryget(&mem->css));
569         rcu_read_unlock();
570         return mem;
571 }
572
573 /*
574  * Call callback function against all cgroup under hierarchy tree.
575  */
576 static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
577                           int (*func)(struct mem_cgroup *, void *))
578 {
579         int found, ret, nextid;
580         struct cgroup_subsys_state *css;
581         struct mem_cgroup *mem;
582
583         if (!root->use_hierarchy)
584                 return (*func)(root, data);
585
586         nextid = 1;
587         do {
588                 ret = 0;
589                 mem = NULL;
590
591                 rcu_read_lock();
592                 css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
593                                    &found);
594                 if (css && css_tryget(css))
595                         mem = container_of(css, struct mem_cgroup, css);
596                 rcu_read_unlock();
597
598                 if (mem) {
599                         ret = (*func)(mem, data);
600                         css_put(&mem->css);
601                 }
602                 nextid = found + 1;
603         } while (!ret && css);
604
605         return ret;
606 }
607
608 static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
609 {
610         return (mem == root_mem_cgroup);
611 }
612
613 /*
614  * Following LRU functions are allowed to be used without PCG_LOCK.
615  * Operations are called by routine of global LRU independently from memcg.
616  * What we have to take care of here is validness of pc->mem_cgroup.
617  *
618  * Changes to pc->mem_cgroup happens when
619  * 1. charge
620  * 2. moving account
621  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
622  * It is added to LRU before charge.
623  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
624  * When moving account, the page is not on LRU. It's isolated.
625  */
626
627 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
628 {
629         struct page_cgroup *pc;
630         struct mem_cgroup_per_zone *mz;
631
632         if (mem_cgroup_disabled())
633                 return;
634         pc = lookup_page_cgroup(page);
635         /* can happen while we handle swapcache. */
636         if (!TestClearPageCgroupAcctLRU(pc))
637                 return;
638         VM_BUG_ON(!pc->mem_cgroup);
639         /*
640          * We don't check PCG_USED bit. It's cleared when the "page" is finally
641          * removed from global LRU.
642          */
643         mz = page_cgroup_zoneinfo(pc);
644         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
645         if (mem_cgroup_is_root(pc->mem_cgroup))
646                 return;
647         VM_BUG_ON(list_empty(&pc->lru));
648         list_del_init(&pc->lru);
649         return;
650 }
651
652 void mem_cgroup_del_lru(struct page *page)
653 {
654         mem_cgroup_del_lru_list(page, page_lru(page));
655 }
656
657 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
658 {
659         struct mem_cgroup_per_zone *mz;
660         struct page_cgroup *pc;
661
662         if (mem_cgroup_disabled())
663                 return;
664
665         pc = lookup_page_cgroup(page);
666         /*
667          * Used bit is set without atomic ops but after smp_wmb().
668          * For making pc->mem_cgroup visible, insert smp_rmb() here.
669          */
670         smp_rmb();
671         /* unused or root page is not rotated. */
672         if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
673                 return;
674         mz = page_cgroup_zoneinfo(pc);
675         list_move(&pc->lru, &mz->lists[lru]);
676 }
677
678 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
679 {
680         struct page_cgroup *pc;
681         struct mem_cgroup_per_zone *mz;
682
683         if (mem_cgroup_disabled())
684                 return;
685         pc = lookup_page_cgroup(page);
686         VM_BUG_ON(PageCgroupAcctLRU(pc));
687         /*
688          * Used bit is set without atomic ops but after smp_wmb().
689          * For making pc->mem_cgroup visible, insert smp_rmb() here.
690          */
691         smp_rmb();
692         if (!PageCgroupUsed(pc))
693                 return;
694
695         mz = page_cgroup_zoneinfo(pc);
696         MEM_CGROUP_ZSTAT(mz, lru) += 1;
697         SetPageCgroupAcctLRU(pc);
698         if (mem_cgroup_is_root(pc->mem_cgroup))
699                 return;
700         list_add(&pc->lru, &mz->lists[lru]);
701 }
702
703 /*
704  * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
705  * lru because the page may.be reused after it's fully uncharged (because of
706  * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
707  * it again. This function is only used to charge SwapCache. It's done under
708  * lock_page and expected that zone->lru_lock is never held.
709  */
710 static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
711 {
712         unsigned long flags;
713         struct zone *zone = page_zone(page);
714         struct page_cgroup *pc = lookup_page_cgroup(page);
715
716         spin_lock_irqsave(&zone->lru_lock, flags);
717         /*
718          * Forget old LRU when this page_cgroup is *not* used. This Used bit
719          * is guarded by lock_page() because the page is SwapCache.
720          */
721         if (!PageCgroupUsed(pc))
722                 mem_cgroup_del_lru_list(page, page_lru(page));
723         spin_unlock_irqrestore(&zone->lru_lock, flags);
724 }
725
726 static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
727 {
728         unsigned long flags;
729         struct zone *zone = page_zone(page);
730         struct page_cgroup *pc = lookup_page_cgroup(page);
731
732         spin_lock_irqsave(&zone->lru_lock, flags);
733         /* link when the page is linked to LRU but page_cgroup isn't */
734         if (PageLRU(page) && !PageCgroupAcctLRU(pc))
735                 mem_cgroup_add_lru_list(page, page_lru(page));
736         spin_unlock_irqrestore(&zone->lru_lock, flags);
737 }
738
739
740 void mem_cgroup_move_lists(struct page *page,
741                            enum lru_list from, enum lru_list to)
742 {
743         if (mem_cgroup_disabled())
744                 return;
745         mem_cgroup_del_lru_list(page, from);
746         mem_cgroup_add_lru_list(page, to);
747 }
748
749 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
750 {
751         int ret;
752         struct mem_cgroup *curr = NULL;
753
754         task_lock(task);
755         rcu_read_lock();
756         curr = try_get_mem_cgroup_from_mm(task->mm);
757         rcu_read_unlock();
758         task_unlock(task);
759         if (!curr)
760                 return 0;
761         /*
762          * We should check use_hierarchy of "mem" not "curr". Because checking
763          * use_hierarchy of "curr" here make this function true if hierarchy is
764          * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
765          * hierarchy(even if use_hierarchy is disabled in "mem").
766          */
767         if (mem->use_hierarchy)
768                 ret = css_is_ancestor(&curr->css, &mem->css);
769         else
770                 ret = (curr == mem);
771         css_put(&curr->css);
772         return ret;
773 }
774
775 /*
776  * prev_priority control...this will be used in memory reclaim path.
777  */
778 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
779 {
780         int prev_priority;
781
782         spin_lock(&mem->reclaim_param_lock);
783         prev_priority = mem->prev_priority;
784         spin_unlock(&mem->reclaim_param_lock);
785
786         return prev_priority;
787 }
788
789 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
790 {
791         spin_lock(&mem->reclaim_param_lock);
792         if (priority < mem->prev_priority)
793                 mem->prev_priority = priority;
794         spin_unlock(&mem->reclaim_param_lock);
795 }
796
797 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
798 {
799         spin_lock(&mem->reclaim_param_lock);
800         mem->prev_priority = priority;
801         spin_unlock(&mem->reclaim_param_lock);
802 }
803
804 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
805 {
806         unsigned long active;
807         unsigned long inactive;
808         unsigned long gb;
809         unsigned long inactive_ratio;
810
811         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
812         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
813
814         gb = (inactive + active) >> (30 - PAGE_SHIFT);
815         if (gb)
816                 inactive_ratio = int_sqrt(10 * gb);
817         else
818                 inactive_ratio = 1;
819
820         if (present_pages) {
821                 present_pages[0] = inactive;
822                 present_pages[1] = active;
823         }
824
825         return inactive_ratio;
826 }
827
828 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
829 {
830         unsigned long active;
831         unsigned long inactive;
832         unsigned long present_pages[2];
833         unsigned long inactive_ratio;
834
835         inactive_ratio = calc_inactive_ratio(memcg, present_pages);
836
837         inactive = present_pages[0];
838         active = present_pages[1];
839
840         if (inactive * inactive_ratio < active)
841                 return 1;
842
843         return 0;
844 }
845
846 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
847 {
848         unsigned long active;
849         unsigned long inactive;
850
851         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
852         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
853
854         return (active > inactive);
855 }
856
857 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
858                                        struct zone *zone,
859                                        enum lru_list lru)
860 {
861         int nid = zone->zone_pgdat->node_id;
862         int zid = zone_idx(zone);
863         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
864
865         return MEM_CGROUP_ZSTAT(mz, lru);
866 }
867
868 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
869                                                       struct zone *zone)
870 {
871         int nid = zone->zone_pgdat->node_id;
872         int zid = zone_idx(zone);
873         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
874
875         return &mz->reclaim_stat;
876 }
877
878 struct zone_reclaim_stat *
879 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
880 {
881         struct page_cgroup *pc;
882         struct mem_cgroup_per_zone *mz;
883
884         if (mem_cgroup_disabled())
885                 return NULL;
886
887         pc = lookup_page_cgroup(page);
888         /*
889          * Used bit is set without atomic ops but after smp_wmb().
890          * For making pc->mem_cgroup visible, insert smp_rmb() here.
891          */
892         smp_rmb();
893         if (!PageCgroupUsed(pc))
894                 return NULL;
895
896         mz = page_cgroup_zoneinfo(pc);
897         if (!mz)
898                 return NULL;
899
900         return &mz->reclaim_stat;
901 }
902
903 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
904                                         struct list_head *dst,
905                                         unsigned long *scanned, int order,
906                                         int mode, struct zone *z,
907                                         struct mem_cgroup *mem_cont,
908                                         int active, int file)
909 {
910         unsigned long nr_taken = 0;
911         struct page *page;
912         unsigned long scan;
913         LIST_HEAD(pc_list);
914         struct list_head *src;
915         struct page_cgroup *pc, *tmp;
916         int nid = z->zone_pgdat->node_id;
917         int zid = zone_idx(z);
918         struct mem_cgroup_per_zone *mz;
919         int lru = LRU_FILE * file + active;
920         int ret;
921
922         BUG_ON(!mem_cont);
923         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
924         src = &mz->lists[lru];
925
926         scan = 0;
927         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
928                 if (scan >= nr_to_scan)
929                         break;
930
931                 page = pc->page;
932                 if (unlikely(!PageCgroupUsed(pc)))
933                         continue;
934                 if (unlikely(!PageLRU(page)))
935                         continue;
936
937                 scan++;
938                 ret = __isolate_lru_page(page, mode, file);
939                 switch (ret) {
940                 case 0:
941                         list_move(&page->lru, dst);
942                         mem_cgroup_del_lru(page);
943                         nr_taken++;
944                         break;
945                 case -EBUSY:
946                         /* we don't affect global LRU but rotate in our LRU */
947                         mem_cgroup_rotate_lru_list(page, page_lru(page));
948                         break;
949                 default:
950                         break;
951                 }
952         }
953
954         *scanned = scan;
955         return nr_taken;
956 }
957
958 #define mem_cgroup_from_res_counter(counter, member)    \
959         container_of(counter, struct mem_cgroup, member)
960
961 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
962 {
963         if (do_swap_account) {
964                 if (res_counter_check_under_limit(&mem->res) &&
965                         res_counter_check_under_limit(&mem->memsw))
966                         return true;
967         } else
968                 if (res_counter_check_under_limit(&mem->res))
969                         return true;
970         return false;
971 }
972
973 static unsigned int get_swappiness(struct mem_cgroup *memcg)
974 {
975         struct cgroup *cgrp = memcg->css.cgroup;
976         unsigned int swappiness;
977
978         /* root ? */
979         if (cgrp->parent == NULL)
980                 return vm_swappiness;
981
982         spin_lock(&memcg->reclaim_param_lock);
983         swappiness = memcg->swappiness;
984         spin_unlock(&memcg->reclaim_param_lock);
985
986         return swappiness;
987 }
988
989 static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
990 {
991         int *val = data;
992         (*val)++;
993         return 0;
994 }
995
996 /**
997  * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
998  * @memcg: The memory cgroup that went over limit
999  * @p: Task that is going to be killed
1000  *
1001  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1002  * enabled
1003  */
1004 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1005 {
1006         struct cgroup *task_cgrp;
1007         struct cgroup *mem_cgrp;
1008         /*
1009          * Need a buffer in BSS, can't rely on allocations. The code relies
1010          * on the assumption that OOM is serialized for memory controller.
1011          * If this assumption is broken, revisit this code.
1012          */
1013         static char memcg_name[PATH_MAX];
1014         int ret;
1015
1016         if (!memcg)
1017                 return;
1018
1019
1020         rcu_read_lock();
1021
1022         mem_cgrp = memcg->css.cgroup;
1023         task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1024
1025         ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1026         if (ret < 0) {
1027                 /*
1028                  * Unfortunately, we are unable to convert to a useful name
1029                  * But we'll still print out the usage information
1030                  */
1031                 rcu_read_unlock();
1032                 goto done;
1033         }
1034         rcu_read_unlock();
1035
1036         printk(KERN_INFO "Task in %s killed", memcg_name);
1037
1038         rcu_read_lock();
1039         ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1040         if (ret < 0) {
1041                 rcu_read_unlock();
1042                 goto done;
1043         }
1044         rcu_read_unlock();
1045
1046         /*
1047          * Continues from above, so we don't need an KERN_ level
1048          */
1049         printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1050 done:
1051
1052         printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1053                 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1054                 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1055                 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1056         printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1057                 "failcnt %llu\n",
1058                 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1059                 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1060                 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1061 }
1062
1063 /*
1064  * This function returns the number of memcg under hierarchy tree. Returns
1065  * 1(self count) if no children.
1066  */
1067 static int mem_cgroup_count_children(struct mem_cgroup *mem)
1068 {
1069         int num = 0;
1070         mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
1071         return num;
1072 }
1073
1074 /*
1075  * Visit the first child (need not be the first child as per the ordering
1076  * of the cgroup list, since we track last_scanned_child) of @mem and use
1077  * that to reclaim free pages from.
1078  */
1079 static struct mem_cgroup *
1080 mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1081 {
1082         struct mem_cgroup *ret = NULL;
1083         struct cgroup_subsys_state *css;
1084         int nextid, found;
1085
1086         if (!root_mem->use_hierarchy) {
1087                 css_get(&root_mem->css);
1088                 ret = root_mem;
1089         }
1090
1091         while (!ret) {
1092                 rcu_read_lock();
1093                 nextid = root_mem->last_scanned_child + 1;
1094                 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
1095                                    &found);
1096                 if (css && css_tryget(css))
1097                         ret = container_of(css, struct mem_cgroup, css);
1098
1099                 rcu_read_unlock();
1100                 /* Updates scanning parameter */
1101                 spin_lock(&root_mem->reclaim_param_lock);
1102                 if (!css) {
1103                         /* this means start scan from ID:1 */
1104                         root_mem->last_scanned_child = 0;
1105                 } else
1106                         root_mem->last_scanned_child = found;
1107                 spin_unlock(&root_mem->reclaim_param_lock);
1108         }
1109
1110         return ret;
1111 }
1112
1113 /*
1114  * Scan the hierarchy if needed to reclaim memory. We remember the last child
1115  * we reclaimed from, so that we don't end up penalizing one child extensively
1116  * based on its position in the children list.
1117  *
1118  * root_mem is the original ancestor that we've been reclaim from.
1119  *
1120  * We give up and return to the caller when we visit root_mem twice.
1121  * (other groups can be removed while we're walking....)
1122  *
1123  * If shrink==true, for avoiding to free too much, this returns immedieately.
1124  */
1125 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1126                                                 struct zone *zone,
1127                                                 gfp_t gfp_mask,
1128                                                 unsigned long reclaim_options)
1129 {
1130         struct mem_cgroup *victim;
1131         int ret, total = 0;
1132         int loop = 0;
1133         bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1134         bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1135         bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1136         unsigned long excess = mem_cgroup_get_excess(root_mem);
1137
1138         /* If memsw_is_minimum==1, swap-out is of-no-use. */
1139         if (root_mem->memsw_is_minimum)
1140                 noswap = true;
1141
1142         while (1) {
1143                 victim = mem_cgroup_select_victim(root_mem);
1144                 if (victim == root_mem) {
1145                         loop++;
1146                         if (loop >= 2) {
1147                                 /*
1148                                  * If we have not been able to reclaim
1149                                  * anything, it might because there are
1150                                  * no reclaimable pages under this hierarchy
1151                                  */
1152                                 if (!check_soft || !total) {
1153                                         css_put(&victim->css);
1154                                         break;
1155                                 }
1156                                 /*
1157                                  * We want to do more targetted reclaim.
1158                                  * excess >> 2 is not to excessive so as to
1159                                  * reclaim too much, nor too less that we keep
1160                                  * coming back to reclaim from this cgroup
1161                                  */
1162                                 if (total >= (excess >> 2) ||
1163                                         (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
1164                                         css_put(&victim->css);
1165                                         break;
1166                                 }
1167                         }
1168                 }
1169                 if (!mem_cgroup_local_usage(&victim->stat)) {
1170                         /* this cgroup's local usage == 0 */
1171                         css_put(&victim->css);
1172                         continue;
1173                 }
1174                 /* we use swappiness of local cgroup */
1175                 if (check_soft)
1176                         ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1177                                 noswap, get_swappiness(victim), zone,
1178                                 zone->zone_pgdat->node_id);
1179                 else
1180                         ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1181                                                 noswap, get_swappiness(victim));
1182                 css_put(&victim->css);
1183                 /*
1184                  * At shrinking usage, we can't check we should stop here or
1185                  * reclaim more. It's depends on callers. last_scanned_child
1186                  * will work enough for keeping fairness under tree.
1187                  */
1188                 if (shrink)
1189                         return ret;
1190                 total += ret;
1191                 if (check_soft) {
1192                         if (res_counter_check_under_soft_limit(&root_mem->res))
1193                                 return total;
1194                 } else if (mem_cgroup_check_under_limit(root_mem))
1195                         return 1 + total;
1196         }
1197         return total;
1198 }
1199
1200 bool mem_cgroup_oom_called(struct task_struct *task)
1201 {
1202         bool ret = false;
1203         struct mem_cgroup *mem;
1204         struct mm_struct *mm;
1205
1206         rcu_read_lock();
1207         mm = task->mm;
1208         if (!mm)
1209                 mm = &init_mm;
1210         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
1211         if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
1212                 ret = true;
1213         rcu_read_unlock();
1214         return ret;
1215 }
1216
1217 static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
1218 {
1219         mem->last_oom_jiffies = jiffies;
1220         return 0;
1221 }
1222
1223 static void record_last_oom(struct mem_cgroup *mem)
1224 {
1225         mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
1226 }
1227
1228 /*
1229  * Currently used to update mapped file statistics, but the routine can be
1230  * generalized to update other statistics as well.
1231  */
1232 void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
1233 {
1234         struct mem_cgroup *mem;
1235         struct mem_cgroup_stat *stat;
1236         struct mem_cgroup_stat_cpu *cpustat;
1237         int cpu;
1238         struct page_cgroup *pc;
1239
1240         if (!page_is_file_cache(page))
1241                 return;
1242
1243         pc = lookup_page_cgroup(page);
1244         if (unlikely(!pc))
1245                 return;
1246
1247         lock_page_cgroup(pc);
1248         mem = pc->mem_cgroup;
1249         if (!mem)
1250                 goto done;
1251
1252         if (!PageCgroupUsed(pc))
1253                 goto done;
1254
1255         /*
1256          * Preemption is already disabled, we don't need get_cpu()
1257          */
1258         cpu = smp_processor_id();
1259         stat = &mem->stat;
1260         cpustat = &stat->cpustat[cpu];
1261
1262         __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val);
1263 done:
1264         unlock_page_cgroup(pc);
1265 }
1266
1267 /*
1268  * Unlike exported interface, "oom" parameter is added. if oom==true,
1269  * oom-killer can be invoked.
1270  */
1271 static int __mem_cgroup_try_charge(struct mm_struct *mm,
1272                         gfp_t gfp_mask, struct mem_cgroup **memcg,
1273                         bool oom, struct page *page)
1274 {
1275         struct mem_cgroup *mem, *mem_over_limit;
1276         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1277         struct res_counter *fail_res;
1278
1279         if (unlikely(test_thread_flag(TIF_MEMDIE))) {
1280                 /* Don't account this! */
1281                 *memcg = NULL;
1282                 return 0;
1283         }
1284
1285         /*
1286          * We always charge the cgroup the mm_struct belongs to.
1287          * The mm_struct's mem_cgroup changes on task migration if the
1288          * thread group leader migrates. It's possible that mm is not
1289          * set, if so charge the init_mm (happens for pagecache usage).
1290          */
1291         mem = *memcg;
1292         if (likely(!mem)) {
1293                 mem = try_get_mem_cgroup_from_mm(mm);
1294                 *memcg = mem;
1295         } else {
1296                 css_get(&mem->css);
1297         }
1298         if (unlikely(!mem))
1299                 return 0;
1300
1301         VM_BUG_ON(css_is_removed(&mem->css));
1302
1303         while (1) {
1304                 int ret = 0;
1305                 unsigned long flags = 0;
1306
1307                 if (mem_cgroup_is_root(mem))
1308                         goto done;
1309                 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
1310                 if (likely(!ret)) {
1311                         if (!do_swap_account)
1312                                 break;
1313                         ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
1314                                                         &fail_res);
1315                         if (likely(!ret))
1316                                 break;
1317                         /* mem+swap counter fails */
1318                         res_counter_uncharge(&mem->res, PAGE_SIZE);
1319                         flags |= MEM_CGROUP_RECLAIM_NOSWAP;
1320                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1321                                                                         memsw);
1322                 } else
1323                         /* mem counter fails */
1324                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1325                                                                         res);
1326
1327                 if (!(gfp_mask & __GFP_WAIT))
1328                         goto nomem;
1329
1330                 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
1331                                                 gfp_mask, flags);
1332                 if (ret)
1333                         continue;
1334
1335                 /*
1336                  * try_to_free_mem_cgroup_pages() might not give us a full
1337                  * picture of reclaim. Some pages are reclaimed and might be
1338                  * moved to swap cache or just unmapped from the cgroup.
1339                  * Check the limit again to see if the reclaim reduced the
1340                  * current usage of the cgroup before giving up
1341                  *
1342                  */
1343                 if (mem_cgroup_check_under_limit(mem_over_limit))
1344                         continue;
1345
1346                 if (!nr_retries--) {
1347                         if (oom) {
1348                                 mutex_lock(&memcg_tasklist);
1349                                 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
1350                                 mutex_unlock(&memcg_tasklist);
1351                                 record_last_oom(mem_over_limit);
1352                         }
1353                         goto nomem;
1354                 }
1355         }
1356         /*
1357          * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
1358          * if they exceeds softlimit.
1359          */
1360         if (mem_cgroup_soft_limit_check(mem))
1361                 mem_cgroup_update_tree(mem, page);
1362 done:
1363         return 0;
1364 nomem:
1365         css_put(&mem->css);
1366         return -ENOMEM;
1367 }
1368
1369 /*
1370  * A helper function to get mem_cgroup from ID. must be called under
1371  * rcu_read_lock(). The caller must check css_is_removed() or some if
1372  * it's concern. (dropping refcnt from swap can be called against removed
1373  * memcg.)
1374  */
1375 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
1376 {
1377         struct cgroup_subsys_state *css;
1378
1379         /* ID 0 is unused ID */
1380         if (!id)
1381                 return NULL;
1382         css = css_lookup(&mem_cgroup_subsys, id);
1383         if (!css)
1384                 return NULL;
1385         return container_of(css, struct mem_cgroup, css);
1386 }
1387
1388 static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
1389 {
1390         struct mem_cgroup *mem;
1391         struct page_cgroup *pc;
1392         unsigned short id;
1393         swp_entry_t ent;
1394
1395         VM_BUG_ON(!PageLocked(page));
1396
1397         if (!PageSwapCache(page))
1398                 return NULL;
1399
1400         pc = lookup_page_cgroup(page);
1401         lock_page_cgroup(pc);
1402         if (PageCgroupUsed(pc)) {
1403                 mem = pc->mem_cgroup;
1404                 if (mem && !css_tryget(&mem->css))
1405                         mem = NULL;
1406         } else {
1407                 ent.val = page_private(page);
1408                 id = lookup_swap_cgroup(ent);
1409                 rcu_read_lock();
1410                 mem = mem_cgroup_lookup(id);
1411                 if (mem && !css_tryget(&mem->css))
1412                         mem = NULL;
1413                 rcu_read_unlock();
1414         }
1415         unlock_page_cgroup(pc);
1416         return mem;
1417 }
1418
1419 /*
1420  * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
1421  * USED state. If already USED, uncharge and return.
1422  */
1423
1424 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1425                                      struct page_cgroup *pc,
1426                                      enum charge_type ctype)
1427 {
1428         /* try_charge() can return NULL to *memcg, taking care of it. */
1429         if (!mem)
1430                 return;
1431
1432         lock_page_cgroup(pc);
1433         if (unlikely(PageCgroupUsed(pc))) {
1434                 unlock_page_cgroup(pc);
1435                 if (!mem_cgroup_is_root(mem)) {
1436                         res_counter_uncharge(&mem->res, PAGE_SIZE);
1437                         if (do_swap_account)
1438                                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1439                 }
1440                 css_put(&mem->css);
1441                 return;
1442         }
1443
1444         pc->mem_cgroup = mem;
1445         /*
1446          * We access a page_cgroup asynchronously without lock_page_cgroup().
1447          * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
1448          * is accessed after testing USED bit. To make pc->mem_cgroup visible
1449          * before USED bit, we need memory barrier here.
1450          * See mem_cgroup_add_lru_list(), etc.
1451          */
1452         smp_wmb();
1453         switch (ctype) {
1454         case MEM_CGROUP_CHARGE_TYPE_CACHE:
1455         case MEM_CGROUP_CHARGE_TYPE_SHMEM:
1456                 SetPageCgroupCache(pc);
1457                 SetPageCgroupUsed(pc);
1458                 break;
1459         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1460                 ClearPageCgroupCache(pc);
1461                 SetPageCgroupUsed(pc);
1462                 break;
1463         default:
1464                 break;
1465         }
1466
1467         mem_cgroup_charge_statistics(mem, pc, true);
1468
1469         unlock_page_cgroup(pc);
1470 }
1471
1472 /**
1473  * mem_cgroup_move_account - move account of the page
1474  * @pc: page_cgroup of the page.
1475  * @from: mem_cgroup which the page is moved from.
1476  * @to: mem_cgroup which the page is moved to. @from != @to.
1477  *
1478  * The caller must confirm following.
1479  * - page is not on LRU (isolate_page() is useful.)
1480  *
1481  * returns 0 at success,
1482  * returns -EBUSY when lock is busy or "pc" is unstable.
1483  *
1484  * This function does "uncharge" from old cgroup but doesn't do "charge" to
1485  * new cgroup. It should be done by a caller.
1486  */
1487
1488 static int mem_cgroup_move_account(struct page_cgroup *pc,
1489         struct mem_cgroup *from, struct mem_cgroup *to)
1490 {
1491         struct mem_cgroup_per_zone *from_mz, *to_mz;
1492         int nid, zid;
1493         int ret = -EBUSY;
1494         struct page *page;
1495         int cpu;
1496         struct mem_cgroup_stat *stat;
1497         struct mem_cgroup_stat_cpu *cpustat;
1498
1499         VM_BUG_ON(from == to);
1500         VM_BUG_ON(PageLRU(pc->page));
1501
1502         nid = page_cgroup_nid(pc);
1503         zid = page_cgroup_zid(pc);
1504         from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
1505         to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
1506
1507         if (!trylock_page_cgroup(pc))
1508                 return ret;
1509
1510         if (!PageCgroupUsed(pc))
1511                 goto out;
1512
1513         if (pc->mem_cgroup != from)
1514                 goto out;
1515
1516         if (!mem_cgroup_is_root(from))
1517                 res_counter_uncharge(&from->res, PAGE_SIZE);
1518         mem_cgroup_charge_statistics(from, pc, false);
1519
1520         page = pc->page;
1521         if (page_is_file_cache(page) && page_mapped(page)) {
1522                 cpu = smp_processor_id();
1523                 /* Update mapped_file data for mem_cgroup "from" */
1524                 stat = &from->stat;
1525                 cpustat = &stat->cpustat[cpu];
1526                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
1527                                                 -1);
1528
1529                 /* Update mapped_file data for mem_cgroup "to" */
1530                 stat = &to->stat;
1531                 cpustat = &stat->cpustat[cpu];
1532                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
1533                                                 1);
1534         }
1535
1536         if (do_swap_account && !mem_cgroup_is_root(from))
1537                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
1538         css_put(&from->css);
1539
1540         css_get(&to->css);
1541         pc->mem_cgroup = to;
1542         mem_cgroup_charge_statistics(to, pc, true);
1543         ret = 0;
1544 out:
1545         unlock_page_cgroup(pc);
1546         /*
1547          * We charges against "to" which may not have any tasks. Then, "to"
1548          * can be under rmdir(). But in current implementation, caller of
1549          * this function is just force_empty() and it's garanteed that
1550          * "to" is never removed. So, we don't check rmdir status here.
1551          */
1552         return ret;
1553 }
1554
1555 /*
1556  * move charges to its parent.
1557  */
1558
1559 static int mem_cgroup_move_parent(struct page_cgroup *pc,
1560                                   struct mem_cgroup *child,
1561                                   gfp_t gfp_mask)
1562 {
1563         struct page *page = pc->page;
1564         struct cgroup *cg = child->css.cgroup;
1565         struct cgroup *pcg = cg->parent;
1566         struct mem_cgroup *parent;
1567         int ret;
1568
1569         /* Is ROOT ? */
1570         if (!pcg)
1571                 return -EINVAL;
1572
1573
1574         parent = mem_cgroup_from_cont(pcg);
1575
1576
1577         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page);
1578         if (ret || !parent)
1579                 return ret;
1580
1581         if (!get_page_unless_zero(page)) {
1582                 ret = -EBUSY;
1583                 goto uncharge;
1584         }
1585
1586         ret = isolate_lru_page(page);
1587
1588         if (ret)
1589                 goto cancel;
1590
1591         ret = mem_cgroup_move_account(pc, child, parent);
1592
1593         putback_lru_page(page);
1594         if (!ret) {
1595                 put_page(page);
1596                 /* drop extra refcnt by try_charge() */
1597                 css_put(&parent->css);
1598                 return 0;
1599         }
1600
1601 cancel:
1602         put_page(page);
1603 uncharge:
1604         /* drop extra refcnt by try_charge() */
1605         css_put(&parent->css);
1606         /* uncharge if move fails */
1607         if (!mem_cgroup_is_root(parent)) {
1608                 res_counter_uncharge(&parent->res, PAGE_SIZE);
1609                 if (do_swap_account)
1610                         res_counter_uncharge(&parent->memsw, PAGE_SIZE);
1611         }
1612         return ret;
1613 }
1614
1615 /*
1616  * Charge the memory controller for page usage.
1617  * Return
1618  * 0 if the charge was successful
1619  * < 0 if the cgroup is over its limit
1620  */
1621 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
1622                                 gfp_t gfp_mask, enum charge_type ctype,
1623                                 struct mem_cgroup *memcg)
1624 {
1625         struct mem_cgroup *mem;
1626         struct page_cgroup *pc;
1627         int ret;
1628
1629         pc = lookup_page_cgroup(page);
1630         /* can happen at boot */
1631         if (unlikely(!pc))
1632                 return 0;
1633         prefetchw(pc);
1634
1635         mem = memcg;
1636         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page);
1637         if (ret || !mem)
1638                 return ret;
1639
1640         __mem_cgroup_commit_charge(mem, pc, ctype);
1641         return 0;
1642 }
1643
1644 int mem_cgroup_newpage_charge(struct page *page,
1645                               struct mm_struct *mm, gfp_t gfp_mask)
1646 {
1647         if (mem_cgroup_disabled())
1648                 return 0;
1649         if (PageCompound(page))
1650                 return 0;
1651         /*
1652          * If already mapped, we don't have to account.
1653          * If page cache, page->mapping has address_space.
1654          * But page->mapping may have out-of-use anon_vma pointer,
1655          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
1656          * is NULL.
1657          */
1658         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
1659                 return 0;
1660         if (unlikely(!mm))
1661                 mm = &init_mm;
1662         return mem_cgroup_charge_common(page, mm, gfp_mask,
1663                                 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
1664 }
1665
1666 static void
1667 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1668                                         enum charge_type ctype);
1669
1670 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1671                                 gfp_t gfp_mask)
1672 {
1673         struct mem_cgroup *mem = NULL;
1674         int ret;
1675
1676         if (mem_cgroup_disabled())
1677                 return 0;
1678         if (PageCompound(page))
1679                 return 0;
1680         /*
1681          * Corner case handling. This is called from add_to_page_cache()
1682          * in usual. But some FS (shmem) precharges this page before calling it
1683          * and call add_to_page_cache() with GFP_NOWAIT.
1684          *
1685          * For GFP_NOWAIT case, the page may be pre-charged before calling
1686          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
1687          * charge twice. (It works but has to pay a bit larger cost.)
1688          * And when the page is SwapCache, it should take swap information
1689          * into account. This is under lock_page() now.
1690          */
1691         if (!(gfp_mask & __GFP_WAIT)) {
1692                 struct page_cgroup *pc;
1693
1694
1695                 pc = lookup_page_cgroup(page);
1696                 if (!pc)
1697                         return 0;
1698                 lock_page_cgroup(pc);
1699                 if (PageCgroupUsed(pc)) {
1700                         unlock_page_cgroup(pc);
1701                         return 0;
1702                 }
1703                 unlock_page_cgroup(pc);
1704         }
1705
1706         if (unlikely(!mm && !mem))
1707                 mm = &init_mm;
1708
1709         if (page_is_file_cache(page))
1710                 return mem_cgroup_charge_common(page, mm, gfp_mask,
1711                                 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
1712
1713         /* shmem */
1714         if (PageSwapCache(page)) {
1715                 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
1716                 if (!ret)
1717                         __mem_cgroup_commit_charge_swapin(page, mem,
1718                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
1719         } else
1720                 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
1721                                         MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1722
1723         return ret;
1724 }
1725
1726 /*
1727  * While swap-in, try_charge -> commit or cancel, the page is locked.
1728  * And when try_charge() successfully returns, one refcnt to memcg without
1729  * struct page_cgroup is aquired. This refcnt will be cumsumed by
1730  * "commit()" or removed by "cancel()"
1731  */
1732 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
1733                                  struct page *page,
1734                                  gfp_t mask, struct mem_cgroup **ptr)
1735 {
1736         struct mem_cgroup *mem;
1737         int ret;
1738
1739         if (mem_cgroup_disabled())
1740                 return 0;
1741
1742         if (!do_swap_account)
1743                 goto charge_cur_mm;
1744         /*
1745          * A racing thread's fault, or swapoff, may have already updated
1746          * the pte, and even removed page from swap cache: return success
1747          * to go on to do_swap_page()'s pte_same() test, which should fail.
1748          */
1749         if (!PageSwapCache(page))
1750                 return 0;
1751         mem = try_get_mem_cgroup_from_swapcache(page);
1752         if (!mem)
1753                 goto charge_cur_mm;
1754         *ptr = mem;
1755         ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, page);
1756         /* drop extra refcnt from tryget */
1757         css_put(&mem->css);
1758         return ret;
1759 charge_cur_mm:
1760         if (unlikely(!mm))
1761                 mm = &init_mm;
1762         return __mem_cgroup_try_charge(mm, mask, ptr, true, page);
1763 }
1764
1765 static void
1766 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1767                                         enum charge_type ctype)
1768 {
1769         struct page_cgroup *pc;
1770
1771         if (mem_cgroup_disabled())
1772                 return;
1773         if (!ptr)
1774                 return;
1775         cgroup_exclude_rmdir(&ptr->css);
1776         pc = lookup_page_cgroup(page);
1777         mem_cgroup_lru_del_before_commit_swapcache(page);
1778         __mem_cgroup_commit_charge(ptr, pc, ctype);
1779         mem_cgroup_lru_add_after_commit_swapcache(page);
1780         /*
1781          * Now swap is on-memory. This means this page may be
1782          * counted both as mem and swap....double count.
1783          * Fix it by uncharging from memsw. Basically, this SwapCache is stable
1784          * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
1785          * may call delete_from_swap_cache() before reach here.
1786          */
1787         if (do_swap_account && PageSwapCache(page)) {
1788                 swp_entry_t ent = {.val = page_private(page)};
1789                 unsigned short id;
1790                 struct mem_cgroup *memcg;
1791
1792                 id = swap_cgroup_record(ent, 0);
1793                 rcu_read_lock();
1794                 memcg = mem_cgroup_lookup(id);
1795                 if (memcg) {
1796                         /*
1797                          * This recorded memcg can be obsolete one. So, avoid
1798                          * calling css_tryget
1799                          */
1800                         if (!mem_cgroup_is_root(memcg))
1801                                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1802                         mem_cgroup_swap_statistics(memcg, false);
1803                         mem_cgroup_put(memcg);
1804                 }
1805                 rcu_read_unlock();
1806         }
1807         /*
1808          * At swapin, we may charge account against cgroup which has no tasks.
1809          * So, rmdir()->pre_destroy() can be called while we do this charge.
1810          * In that case, we need to call pre_destroy() again. check it here.
1811          */
1812         cgroup_release_and_wakeup_rmdir(&ptr->css);
1813 }
1814
1815 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1816 {
1817         __mem_cgroup_commit_charge_swapin(page, ptr,
1818                                         MEM_CGROUP_CHARGE_TYPE_MAPPED);
1819 }
1820
1821 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1822 {
1823         if (mem_cgroup_disabled())
1824                 return;
1825         if (!mem)
1826                 return;
1827         if (!mem_cgroup_is_root(mem)) {
1828                 res_counter_uncharge(&mem->res, PAGE_SIZE);
1829                 if (do_swap_account)
1830                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1831         }
1832         css_put(&mem->css);
1833 }
1834
1835
1836 /*
1837  * uncharge if !page_mapped(page)
1838  */
1839 static struct mem_cgroup *
1840 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1841 {
1842         struct page_cgroup *pc;
1843         struct mem_cgroup *mem = NULL;
1844         struct mem_cgroup_per_zone *mz;
1845
1846         if (mem_cgroup_disabled())
1847                 return NULL;
1848
1849         if (PageSwapCache(page))
1850                 return NULL;
1851
1852         /*
1853          * Check if our page_cgroup is valid
1854          */
1855         pc = lookup_page_cgroup(page);
1856         if (unlikely(!pc || !PageCgroupUsed(pc)))
1857                 return NULL;
1858
1859         lock_page_cgroup(pc);
1860
1861         mem = pc->mem_cgroup;
1862
1863         if (!PageCgroupUsed(pc))
1864                 goto unlock_out;
1865
1866         switch (ctype) {
1867         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1868         case MEM_CGROUP_CHARGE_TYPE_DROP:
1869                 if (page_mapped(page))
1870                         goto unlock_out;
1871                 break;
1872         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1873                 if (!PageAnon(page)) {  /* Shared memory */
1874                         if (page->mapping && !page_is_file_cache(page))
1875                                 goto unlock_out;
1876                 } else if (page_mapped(page)) /* Anon */
1877                                 goto unlock_out;
1878                 break;
1879         default:
1880                 break;
1881         }
1882
1883         if (!mem_cgroup_is_root(mem)) {
1884                 res_counter_uncharge(&mem->res, PAGE_SIZE);
1885                 if (do_swap_account &&
1886                                 (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1887                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1888         }
1889         if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1890                 mem_cgroup_swap_statistics(mem, true);
1891         mem_cgroup_charge_statistics(mem, pc, false);
1892
1893         ClearPageCgroupUsed(pc);
1894         /*
1895          * pc->mem_cgroup is not cleared here. It will be accessed when it's
1896          * freed from LRU. This is safe because uncharged page is expected not
1897          * to be reused (freed soon). Exception is SwapCache, it's handled by
1898          * special functions.
1899          */
1900
1901         mz = page_cgroup_zoneinfo(pc);
1902         unlock_page_cgroup(pc);
1903
1904         if (mem_cgroup_soft_limit_check(mem))
1905                 mem_cgroup_update_tree(mem, page);
1906         /* at swapout, this memcg will be accessed to record to swap */
1907         if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1908                 css_put(&mem->css);
1909
1910         return mem;
1911
1912 unlock_out:
1913         unlock_page_cgroup(pc);
1914         return NULL;
1915 }
1916
1917 void mem_cgroup_uncharge_page(struct page *page)
1918 {
1919         /* early check. */
1920         if (page_mapped(page))
1921                 return;
1922         if (page->mapping && !PageAnon(page))
1923                 return;
1924         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1925 }
1926
1927 void mem_cgroup_uncharge_cache_page(struct page *page)
1928 {
1929         VM_BUG_ON(page_mapped(page));
1930         VM_BUG_ON(page->mapping);
1931         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1932 }
1933
1934 #ifdef CONFIG_SWAP
1935 /*
1936  * called after __delete_from_swap_cache() and drop "page" account.
1937  * memcg information is recorded to swap_cgroup of "ent"
1938  */
1939 void
1940 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
1941 {
1942         struct mem_cgroup *memcg;
1943         int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
1944
1945         if (!swapout) /* this was a swap cache but the swap is unused ! */
1946                 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
1947
1948         memcg = __mem_cgroup_uncharge_common(page, ctype);
1949
1950         /* record memcg information */
1951         if (do_swap_account && swapout && memcg) {
1952                 swap_cgroup_record(ent, css_id(&memcg->css));
1953                 mem_cgroup_get(memcg);
1954         }
1955         if (swapout && memcg)
1956                 css_put(&memcg->css);
1957 }
1958 #endif
1959
1960 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1961 /*
1962  * called from swap_entry_free(). remove record in swap_cgroup and
1963  * uncharge "memsw" account.
1964  */
1965 void mem_cgroup_uncharge_swap(swp_entry_t ent)
1966 {
1967         struct mem_cgroup *memcg;
1968         unsigned short id;
1969
1970         if (!do_swap_account)
1971                 return;
1972
1973         id = swap_cgroup_record(ent, 0);
1974         rcu_read_lock();
1975         memcg = mem_cgroup_lookup(id);
1976         if (memcg) {
1977                 /*
1978                  * We uncharge this because swap is freed.
1979                  * This memcg can be obsolete one. We avoid calling css_tryget
1980                  */
1981                 if (!mem_cgroup_is_root(memcg))
1982                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1983                 mem_cgroup_swap_statistics(memcg, false);
1984                 mem_cgroup_put(memcg);
1985         }
1986         rcu_read_unlock();
1987 }
1988 #endif
1989
1990 /*
1991  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1992  * page belongs to.
1993  */
1994 int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1995 {
1996         struct page_cgroup *pc;
1997         struct mem_cgroup *mem = NULL;
1998         int ret = 0;
1999
2000         if (mem_cgroup_disabled())
2001                 return 0;
2002
2003         pc = lookup_page_cgroup(page);
2004         lock_page_cgroup(pc);
2005         if (PageCgroupUsed(pc)) {
2006                 mem = pc->mem_cgroup;
2007                 css_get(&mem->css);
2008         }
2009         unlock_page_cgroup(pc);
2010
2011         *ptr = mem;
2012         if (mem) {
2013                 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false,
2014                                                 page);
2015                 css_put(&mem->css);
2016         }
2017         return ret;
2018 }
2019
2020 /* remove redundant charge if migration failed*/
2021 void mem_cgroup_end_migration(struct mem_cgroup *mem,
2022                 struct page *oldpage, struct page *newpage)
2023 {
2024         struct page *target, *unused;
2025         struct page_cgroup *pc;
2026         enum charge_type ctype;
2027
2028         if (!mem)
2029                 return;
2030         cgroup_exclude_rmdir(&mem->css);
2031         /* at migration success, oldpage->mapping is NULL. */
2032         if (oldpage->mapping) {
2033                 target = oldpage;
2034                 unused = NULL;
2035         } else {
2036                 target = newpage;
2037                 unused = oldpage;
2038         }
2039
2040         if (PageAnon(target))
2041                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
2042         else if (page_is_file_cache(target))
2043                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
2044         else
2045                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2046
2047         /* unused page is not on radix-tree now. */
2048         if (unused)
2049                 __mem_cgroup_uncharge_common(unused, ctype);
2050
2051         pc = lookup_page_cgroup(target);
2052         /*
2053          * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
2054          * So, double-counting is effectively avoided.
2055          */
2056         __mem_cgroup_commit_charge(mem, pc, ctype);
2057
2058         /*
2059          * Both of oldpage and newpage are still under lock_page().
2060          * Then, we don't have to care about race in radix-tree.
2061          * But we have to be careful that this page is unmapped or not.
2062          *
2063          * There is a case for !page_mapped(). At the start of
2064          * migration, oldpage was mapped. But now, it's zapped.
2065          * But we know *target* page is not freed/reused under us.
2066          * mem_cgroup_uncharge_page() does all necessary checks.
2067          */
2068         if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
2069                 mem_cgroup_uncharge_page(target);
2070         /*
2071          * At migration, we may charge account against cgroup which has no tasks
2072          * So, rmdir()->pre_destroy() can be called while we do this charge.
2073          * In that case, we need to call pre_destroy() again. check it here.
2074          */
2075         cgroup_release_and_wakeup_rmdir(&mem->css);
2076 }
2077
2078 /*
2079  * A call to try to shrink memory usage on charge failure at shmem's swapin.
2080  * Calling hierarchical_reclaim is not enough because we should update
2081  * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
2082  * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
2083  * not from the memcg which this page would be charged to.
2084  * try_charge_swapin does all of these works properly.
2085  */
2086 int mem_cgroup_shmem_charge_fallback(struct page *page,
2087                             struct mm_struct *mm,
2088                             gfp_t gfp_mask)
2089 {
2090         struct mem_cgroup *mem = NULL;
2091         int ret;
2092
2093         if (mem_cgroup_disabled())
2094                 return 0;
2095
2096         ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2097         if (!ret)
2098                 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
2099
2100         return ret;
2101 }
2102
2103 static DEFINE_MUTEX(set_limit_mutex);
2104
2105 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2106                                 unsigned long long val)
2107 {
2108         int retry_count;
2109         int progress;
2110         u64 memswlimit;
2111         int ret = 0;
2112         int children = mem_cgroup_count_children(memcg);
2113         u64 curusage, oldusage;
2114
2115         /*
2116          * For keeping hierarchical_reclaim simple, how long we should retry
2117          * is depends on callers. We set our retry-count to be function
2118          * of # of children which we should visit in this loop.
2119          */
2120         retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
2121
2122         oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2123
2124         while (retry_count) {
2125                 if (signal_pending(current)) {
2126                         ret = -EINTR;
2127                         break;
2128                 }
2129                 /*
2130                  * Rather than hide all in some function, I do this in
2131                  * open coded manner. You see what this really does.
2132                  * We have to guarantee mem->res.limit < mem->memsw.limit.
2133                  */
2134                 mutex_lock(&set_limit_mutex);
2135                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2136                 if (memswlimit < val) {
2137                         ret = -EINVAL;
2138                         mutex_unlock(&set_limit_mutex);
2139                         break;
2140                 }
2141                 ret = res_counter_set_limit(&memcg->res, val);
2142                 if (!ret) {
2143                         if (memswlimit == val)
2144                                 memcg->memsw_is_minimum = true;
2145                         else
2146                                 memcg->memsw_is_minimum = false;
2147                 }
2148                 mutex_unlock(&set_limit_mutex);
2149
2150                 if (!ret)
2151                         break;
2152
2153                 progress = mem_cgroup_hierarchical_reclaim(memcg, NULL,
2154                                                 GFP_KERNEL,
2155                                                 MEM_CGROUP_RECLAIM_SHRINK);
2156                 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2157                 /* Usage is reduced ? */
2158                 if (curusage >= oldusage)
2159                         retry_count--;
2160                 else
2161                         oldusage = curusage;
2162         }
2163
2164         return ret;
2165 }
2166
2167 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2168                                         unsigned long long val)
2169 {
2170         int retry_count;
2171         u64 memlimit, oldusage, curusage;
2172         int children = mem_cgroup_count_children(memcg);
2173         int ret = -EBUSY;
2174
2175         /* see mem_cgroup_resize_res_limit */
2176         retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
2177         oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2178         while (retry_count) {
2179                 if (signal_pending(current)) {
2180                         ret = -EINTR;
2181                         break;
2182                 }
2183                 /*
2184                  * Rather than hide all in some function, I do this in
2185                  * open coded manner. You see what this really does.
2186                  * We have to guarantee mem->res.limit < mem->memsw.limit.
2187                  */
2188                 mutex_lock(&set_limit_mutex);
2189                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2190                 if (memlimit > val) {
2191                         ret = -EINVAL;
2192                         mutex_unlock(&set_limit_mutex);
2193                         break;
2194                 }
2195                 ret = res_counter_set_limit(&memcg->memsw, val);
2196                 if (!ret) {
2197                         if (memlimit == val)
2198                                 memcg->memsw_is_minimum = true;
2199                         else
2200                                 memcg->memsw_is_minimum = false;
2201                 }
2202                 mutex_unlock(&set_limit_mutex);
2203
2204                 if (!ret)
2205                         break;
2206
2207                 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2208                                                 MEM_CGROUP_RECLAIM_NOSWAP |
2209                                                 MEM_CGROUP_RECLAIM_SHRINK);
2210                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2211                 /* Usage is reduced ? */
2212                 if (curusage >= oldusage)
2213                         retry_count--;
2214                 else
2215                         oldusage = curusage;
2216         }
2217         return ret;
2218 }
2219
2220 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2221                                                 gfp_t gfp_mask, int nid,
2222                                                 int zid)
2223 {
2224         unsigned long nr_reclaimed = 0;
2225         struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2226         unsigned long reclaimed;
2227         int loop = 0;
2228         struct mem_cgroup_tree_per_zone *mctz;
2229         unsigned long long excess;
2230
2231         if (order > 0)
2232                 return 0;
2233
2234         mctz = soft_limit_tree_node_zone(nid, zid);
2235         /*
2236          * This loop can run a while, specially if mem_cgroup's continuously
2237          * keep exceeding their soft limit and putting the system under
2238          * pressure
2239          */
2240         do {
2241                 if (next_mz)
2242                         mz = next_mz;
2243                 else
2244                         mz = mem_cgroup_largest_soft_limit_node(mctz);
2245                 if (!mz)
2246                         break;
2247
2248                 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
2249                                                 gfp_mask,
2250                                                 MEM_CGROUP_RECLAIM_SOFT);
2251                 nr_reclaimed += reclaimed;
2252                 spin_lock(&mctz->lock);
2253
2254                 /*
2255                  * If we failed to reclaim anything from this memory cgroup
2256                  * it is time to move on to the next cgroup
2257                  */
2258                 next_mz = NULL;
2259                 if (!reclaimed) {
2260                         do {
2261                                 /*
2262                                  * Loop until we find yet another one.
2263                                  *
2264                                  * By the time we get the soft_limit lock
2265                                  * again, someone might have aded the
2266                                  * group back on the RB tree. Iterate to
2267                                  * make sure we get a different mem.
2268                                  * mem_cgroup_largest_soft_limit_node returns
2269                                  * NULL if no other cgroup is present on
2270                                  * the tree
2271                                  */
2272                                 next_mz =
2273                                 __mem_cgroup_largest_soft_limit_node(mctz);
2274                                 if (next_mz == mz) {
2275                                         css_put(&next_mz->mem->css);
2276                                         next_mz = NULL;
2277                                 } else /* next_mz == NULL or other memcg */
2278                                         break;
2279                         } while (1);
2280                 }
2281                 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
2282                 excess = res_counter_soft_limit_excess(&mz->mem->res);
2283                 /*
2284                  * One school of thought says that we should not add
2285                  * back the node to the tree if reclaim returns 0.
2286                  * But our reclaim could return 0, simply because due
2287                  * to priority we are exposing a smaller subset of
2288                  * memory to reclaim from. Consider this as a longer
2289                  * term TODO.
2290                  */
2291                 /* If excess == 0, no tree ops */
2292                 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
2293                 spin_unlock(&mctz->lock);
2294                 css_put(&mz->mem->css);
2295                 loop++;
2296                 /*
2297                  * Could not reclaim anything and there are no more
2298                  * mem cgroups to try or we seem to be looping without
2299                  * reclaiming anything.
2300                  */
2301                 if (!nr_reclaimed &&
2302                         (next_mz == NULL ||
2303                         loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2304                         break;
2305         } while (!nr_reclaimed);
2306         if (next_mz)
2307                 css_put(&next_mz->mem->css);
2308         return nr_reclaimed;
2309 }
2310
2311 /*
2312  * This routine traverse page_cgroup in given list and drop them all.
2313  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
2314  */
2315 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
2316                                 int node, int zid, enum lru_list lru)
2317 {
2318         struct zone *zone;
2319         struct mem_cgroup_per_zone *mz;
2320         struct page_cgroup *pc, *busy;
2321         unsigned long flags, loop;
2322         struct list_head *list;
2323         int ret = 0;
2324
2325         zone = &NODE_DATA(node)->node_zones[zid];
2326         mz = mem_cgroup_zoneinfo(mem, node, zid);
2327         list = &mz->lists[lru];
2328
2329         loop = MEM_CGROUP_ZSTAT(mz, lru);
2330         /* give some margin against EBUSY etc...*/
2331         loop += 256;
2332         busy = NULL;
2333         while (loop--) {
2334                 ret = 0;
2335                 spin_lock_irqsave(&zone->lru_lock, flags);
2336                 if (list_empty(list)) {
2337                         spin_unlock_irqrestore(&zone->lru_lock, flags);
2338                         break;
2339                 }
2340                 pc = list_entry(list->prev, struct page_cgroup, lru);
2341                 if (busy == pc) {
2342                         list_move(&pc->lru, list);
2343                         busy = 0;
2344                         spin_unlock_irqrestore(&zone->lru_lock, flags);
2345                         continue;
2346                 }
2347                 spin_unlock_irqrestore(&zone->lru_lock, flags);
2348
2349                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
2350                 if (ret == -ENOMEM)
2351                         break;
2352
2353                 if (ret == -EBUSY || ret == -EINVAL) {
2354                         /* found lock contention or "pc" is obsolete. */
2355                         busy = pc;
2356                         cond_resched();
2357                 } else
2358                         busy = NULL;
2359         }
2360
2361         if (!ret && !list_empty(list))
2362                 return -EBUSY;
2363         return ret;
2364 }
2365
2366 /*
2367  * make mem_cgroup's charge to be 0 if there is no task.
2368  * This enables deleting this mem_cgroup.
2369  */
2370 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
2371 {
2372         int ret;
2373         int node, zid, shrink;
2374         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2375         struct cgroup *cgrp = mem->css.cgroup;
2376
2377         css_get(&mem->css);
2378
2379         shrink = 0;
2380         /* should free all ? */
2381         if (free_all)
2382                 goto try_to_free;
2383 move_account:
2384         do {
2385                 ret = -EBUSY;
2386                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
2387                         goto out;
2388                 ret = -EINTR;
2389                 if (signal_pending(current))
2390                         goto out;
2391                 /* This is for making all *used* pages to be on LRU. */
2392                 lru_add_drain_all();
2393                 ret = 0;
2394                 for_each_node_state(node, N_HIGH_MEMORY) {
2395                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
2396                                 enum lru_list l;
2397                                 for_each_lru(l) {
2398                                         ret = mem_cgroup_force_empty_list(mem,
2399                                                         node, zid, l);
2400                                         if (ret)
2401                                                 break;
2402                                 }
2403                         }
2404                         if (ret)
2405                                 break;
2406                 }
2407                 /* it seems parent cgroup doesn't have enough mem */
2408                 if (ret == -ENOMEM)
2409                         goto try_to_free;
2410                 cond_resched();
2411         /* "ret" should also be checked to ensure all lists are empty. */
2412         } while (mem->res.usage > 0 || ret);
2413 out:
2414         css_put(&mem->css);
2415         return ret;
2416
2417 try_to_free:
2418         /* returns EBUSY if there is a task or if we come here twice. */
2419         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
2420                 ret = -EBUSY;
2421                 goto out;
2422         }
2423         /* we call try-to-free pages for make this cgroup empty */
2424         lru_add_drain_all();
2425         /* try to free all pages in this cgroup */
2426         shrink = 1;
2427         while (nr_retries && mem->res.usage > 0) {
2428                 int progress;
2429
2430                 if (signal_pending(current)) {
2431                         ret = -EINTR;
2432                         goto out;
2433                 }
2434                 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
2435                                                 false, get_swappiness(mem));
2436                 if (!progress) {
2437                         nr_retries--;
2438                         /* maybe some writeback is necessary */
2439                         congestion_wait(BLK_RW_ASYNC, HZ/10);
2440                 }
2441
2442         }
2443         lru_add_drain();
2444         /* try move_account...there may be some *locked* pages. */
2445         goto move_account;
2446 }
2447
2448 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
2449 {
2450         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
2451 }
2452
2453
2454 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
2455 {
2456         return mem_cgroup_from_cont(cont)->use_hierarchy;
2457 }
2458
2459 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
2460                                         u64 val)
2461 {
2462         int retval = 0;
2463         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2464         struct cgroup *parent = cont->parent;
2465         struct mem_cgroup *parent_mem = NULL;
2466
2467         if (parent)
2468                 parent_mem = mem_cgroup_from_cont(parent);
2469
2470         cgroup_lock();
2471         /*
2472          * If parent's use_hiearchy is set, we can't make any modifications
2473          * in the child subtrees. If it is unset, then the change can
2474          * occur, provided the current cgroup has no children.
2475          *
2476          * For the root cgroup, parent_mem is NULL, we allow value to be
2477          * set if there are no children.
2478          */
2479         if ((!parent_mem || !parent_mem->use_hierarchy) &&
2480                                 (val == 1 || val == 0)) {
2481                 if (list_empty(&cont->children))
2482                         mem->use_hierarchy = val;
2483                 else
2484                         retval = -EBUSY;
2485         } else
2486                 retval = -EINVAL;
2487         cgroup_unlock();
2488
2489         return retval;
2490 }
2491
2492 struct mem_cgroup_idx_data {
2493         s64 val;
2494         enum mem_cgroup_stat_index idx;
2495 };
2496
2497 static int
2498 mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
2499 {
2500         struct mem_cgroup_idx_data *d = data;
2501         d->val += mem_cgroup_read_stat(&mem->stat, d->idx);
2502         return 0;
2503 }
2504
2505 static void
2506 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
2507                                 enum mem_cgroup_stat_index idx, s64 *val)
2508 {
2509         struct mem_cgroup_idx_data d;
2510         d.idx = idx;
2511         d.val = 0;
2512         mem_cgroup_walk_tree(mem, &d, mem_cgroup_get_idx_stat);
2513         *val = d.val;
2514 }
2515
2516 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
2517 {
2518         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2519         u64 idx_val, val;
2520         int type, name;
2521
2522         type = MEMFILE_TYPE(cft->private);
2523         name = MEMFILE_ATTR(cft->private);
2524         switch (type) {
2525         case _MEM:
2526                 if (name == RES_USAGE && mem_cgroup_is_root(mem)) {
2527                         mem_cgroup_get_recursive_idx_stat(mem,
2528                                 MEM_CGROUP_STAT_CACHE, &idx_val);
2529                         val = idx_val;
2530                         mem_cgroup_get_recursive_idx_stat(mem,
2531                                 MEM_CGROUP_STAT_RSS, &idx_val);
2532                         val += idx_val;
2533                         val <<= PAGE_SHIFT;
2534                 } else
2535                         val = res_counter_read_u64(&mem->res, name);
2536                 break;
2537         case _MEMSWAP:
2538                 if (name == RES_USAGE && mem_cgroup_is_root(mem)) {
2539                         mem_cgroup_get_recursive_idx_stat(mem,
2540                                 MEM_CGROUP_STAT_CACHE, &idx_val);
2541                         val = idx_val;
2542                         mem_cgroup_get_recursive_idx_stat(mem,
2543                                 MEM_CGROUP_STAT_RSS, &idx_val);
2544                         val += idx_val;
2545                         mem_cgroup_get_recursive_idx_stat(mem,
2546                                 MEM_CGROUP_STAT_SWAPOUT, &idx_val);
2547                         val += idx_val;
2548                         val <<= PAGE_SHIFT;
2549                 } else
2550                         val = res_counter_read_u64(&mem->memsw, name);
2551                 break;
2552         default:
2553                 BUG();
2554                 break;
2555         }
2556         return val;
2557 }
2558 /*
2559  * The user of this function is...
2560  * RES_LIMIT.
2561  */
2562 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
2563                             const char *buffer)
2564 {
2565         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
2566         int type, name;
2567         unsigned long long val;
2568         int ret;
2569
2570         type = MEMFILE_TYPE(cft->private);
2571         name = MEMFILE_ATTR(cft->private);
2572         switch (name) {
2573         case RES_LIMIT:
2574                 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
2575                         ret = -EINVAL;
2576                         break;
2577                 }
2578                 /* This function does all necessary parse...reuse it */
2579                 ret = res_counter_memparse_write_strategy(buffer, &val);
2580                 if (ret)
2581                         break;
2582                 if (type == _MEM)
2583                         ret = mem_cgroup_resize_limit(memcg, val);
2584                 else
2585                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
2586                 break;
2587         case RES_SOFT_LIMIT:
2588                 ret = res_counter_memparse_write_strategy(buffer, &val);
2589                 if (ret)
2590                         break;
2591                 /*
2592                  * For memsw, soft limits are hard to implement in terms
2593                  * of semantics, for now, we support soft limits for
2594                  * control without swap
2595                  */
2596                 if (type == _MEM)
2597                         ret = res_counter_set_soft_limit(&memcg->res, val);
2598                 else
2599                         ret = -EINVAL;
2600                 break;
2601         default:
2602                 ret = -EINVAL; /* should be BUG() ? */
2603                 break;
2604         }
2605         return ret;
2606 }
2607
2608 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
2609                 unsigned long long *mem_limit, unsigned long long *memsw_limit)
2610 {
2611         struct cgroup *cgroup;
2612         unsigned long long min_limit, min_memsw_limit, tmp;
2613
2614         min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2615         min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2616         cgroup = memcg->css.cgroup;
2617         if (!memcg->use_hierarchy)
2618                 goto out;
2619
2620         while (cgroup->parent) {
2621                 cgroup = cgroup->parent;
2622                 memcg = mem_cgroup_from_cont(cgroup);
2623                 if (!memcg->use_hierarchy)
2624                         break;
2625                 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
2626                 min_limit = min(min_limit, tmp);
2627                 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2628                 min_memsw_limit = min(min_memsw_limit, tmp);
2629         }
2630 out:
2631         *mem_limit = min_limit;
2632         *memsw_limit = min_memsw_limit;
2633         return;
2634 }
2635
2636 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
2637 {
2638         struct mem_cgroup *mem;
2639         int type, name;
2640
2641         mem = mem_cgroup_from_cont(cont);
2642         type = MEMFILE_TYPE(event);
2643         name = MEMFILE_ATTR(event);
2644         switch (name) {
2645         case RES_MAX_USAGE:
2646                 if (type == _MEM)
2647                         res_counter_reset_max(&mem->res);
2648                 else
2649                         res_counter_reset_max(&mem->memsw);
2650                 break;
2651         case RES_FAILCNT:
2652                 if (type == _MEM)
2653                         res_counter_reset_failcnt(&mem->res);
2654                 else
2655                         res_counter_reset_failcnt(&mem->memsw);
2656                 break;
2657         }
2658
2659         return 0;
2660 }
2661
2662
2663 /* For read statistics */
2664 enum {
2665         MCS_CACHE,
2666         MCS_RSS,
2667         MCS_MAPPED_FILE,
2668         MCS_PGPGIN,
2669         MCS_PGPGOUT,
2670         MCS_SWAP,
2671         MCS_INACTIVE_ANON,
2672         MCS_ACTIVE_ANON,
2673         MCS_INACTIVE_FILE,
2674         MCS_ACTIVE_FILE,
2675         MCS_UNEVICTABLE,
2676         NR_MCS_STAT,
2677 };
2678
2679 struct mcs_total_stat {
2680         s64 stat[NR_MCS_STAT];
2681 };
2682
2683 struct {
2684         char *local_name;
2685         char *total_name;
2686 } memcg_stat_strings[NR_MCS_STAT] = {
2687         {"cache", "total_cache"},
2688         {"rss", "total_rss"},
2689         {"mapped_file", "total_mapped_file"},
2690         {"pgpgin", "total_pgpgin"},
2691         {"pgpgout", "total_pgpgout"},
2692         {"swap", "total_swap"},
2693         {"inactive_anon", "total_inactive_anon"},
2694         {"active_anon", "total_active_anon"},
2695         {"inactive_file", "total_inactive_file"},
2696         {"active_file", "total_active_file"},
2697         {"unevictable", "total_unevictable"}
2698 };
2699
2700
2701 static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
2702 {
2703         struct mcs_total_stat *s = data;
2704         s64 val;
2705
2706         /* per cpu stat */
2707         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
2708         s->stat[MCS_CACHE] += val * PAGE_SIZE;
2709         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
2710         s->stat[MCS_RSS] += val * PAGE_SIZE;
2711         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE);
2712         s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE;
2713         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
2714         s->stat[MCS_PGPGIN] += val;
2715         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
2716         s->stat[MCS_PGPGOUT] += val;
2717         if (do_swap_account) {
2718                 val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_SWAPOUT);
2719                 s->stat[MCS_SWAP] += val * PAGE_SIZE;
2720         }
2721
2722         /* per zone stat */
2723         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
2724         s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
2725         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
2726         s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
2727         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
2728         s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
2729         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
2730         s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
2731         val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
2732         s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
2733         return 0;
2734 }
2735
2736 static void
2737 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
2738 {
2739         mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
2740 }
2741
2742 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
2743                                  struct cgroup_map_cb *cb)
2744 {
2745         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
2746         struct mcs_total_stat mystat;
2747         int i;
2748
2749         memset(&mystat, 0, sizeof(mystat));
2750         mem_cgroup_get_local_stat(mem_cont, &mystat);
2751
2752         for (i = 0; i < NR_MCS_STAT; i++) {
2753                 if (i == MCS_SWAP && !do_swap_account)
2754                         continue;
2755                 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
2756         }
2757
2758         /* Hierarchical information */
2759         {
2760                 unsigned long long limit, memsw_limit;
2761                 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
2762                 cb->fill(cb, "hierarchical_memory_limit", limit);
2763                 if (do_swap_account)
2764                         cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
2765         }
2766
2767         memset(&mystat, 0, sizeof(mystat));
2768         mem_cgroup_get_total_stat(mem_cont, &mystat);
2769         for (i = 0; i < NR_MCS_STAT; i++) {
2770                 if (i == MCS_SWAP && !do_swap_account)
2771                         continue;
2772                 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
2773         }
2774
2775 #ifdef CONFIG_DEBUG_VM
2776         cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
2777
2778         {
2779                 int nid, zid;
2780                 struct mem_cgroup_per_zone *mz;
2781                 unsigned long recent_rotated[2] = {0, 0};
2782                 unsigned long recent_scanned[2] = {0, 0};
2783
2784                 for_each_online_node(nid)
2785                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2786                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
2787
2788                                 recent_rotated[0] +=
2789                                         mz->reclaim_stat.recent_rotated[0];
2790                                 recent_rotated[1] +=
2791                                         mz->reclaim_stat.recent_rotated[1];
2792                                 recent_scanned[0] +=
2793                                         mz->reclaim_stat.recent_scanned[0];
2794                                 recent_scanned[1] +=
2795                                         mz->reclaim_stat.recent_scanned[1];
2796                         }
2797                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
2798                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
2799                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
2800                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
2801         }
2802 #endif
2803
2804         return 0;
2805 }
2806
2807 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
2808 {
2809         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2810
2811         return get_swappiness(memcg);
2812 }
2813
2814 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
2815                                        u64 val)
2816 {
2817         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2818         struct mem_cgroup *parent;
2819
2820         if (val > 100)
2821                 return -EINVAL;
2822
2823         if (cgrp->parent == NULL)
2824                 return -EINVAL;
2825
2826         parent = mem_cgroup_from_cont(cgrp->parent);
2827
2828         cgroup_lock();
2829
2830         /* If under hierarchy, only empty-root can set this value */
2831         if ((parent->use_hierarchy) ||
2832             (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
2833                 cgroup_unlock();
2834                 return -EINVAL;
2835         }
2836
2837         spin_lock(&memcg->reclaim_param_lock);
2838         memcg->swappiness = val;
2839         spin_unlock(&memcg->reclaim_param_lock);
2840
2841         cgroup_unlock();
2842
2843         return 0;
2844 }
2845
2846
2847 static struct cftype mem_cgroup_files[] = {
2848         {
2849                 .name = "usage_in_bytes",
2850                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
2851                 .read_u64 = mem_cgroup_read,
2852         },
2853         {
2854                 .name = "max_usage_in_bytes",
2855                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
2856                 .trigger = mem_cgroup_reset,
2857                 .read_u64 = mem_cgroup_read,
2858         },
2859         {
2860                 .name = "limit_in_bytes",
2861                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
2862                 .write_string = mem_cgroup_write,
2863                 .read_u64 = mem_cgroup_read,
2864         },
2865         {
2866                 .name = "soft_limit_in_bytes",
2867                 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
2868                 .write_string = mem_cgroup_write,
2869                 .read_u64 = mem_cgroup_read,
2870         },
2871         {
2872                 .name = "failcnt",
2873                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
2874                 .trigger = mem_cgroup_reset,
2875                 .read_u64 = mem_cgroup_read,
2876         },
2877         {
2878                 .name = "stat",
2879                 .read_map = mem_control_stat_show,
2880         },
2881         {
2882                 .name = "force_empty",
2883                 .trigger = mem_cgroup_force_empty_write,
2884         },
2885         {
2886                 .name = "use_hierarchy",
2887                 .write_u64 = mem_cgroup_hierarchy_write,
2888                 .read_u64 = mem_cgroup_hierarchy_read,
2889         },
2890         {
2891                 .name = "swappiness",
2892                 .read_u64 = mem_cgroup_swappiness_read,
2893                 .write_u64 = mem_cgroup_swappiness_write,
2894         },
2895 };
2896
2897 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2898 static struct cftype memsw_cgroup_files[] = {
2899         {
2900                 .name = "memsw.usage_in_bytes",
2901                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
2902                 .read_u64 = mem_cgroup_read,
2903         },
2904         {
2905                 .name = "memsw.max_usage_in_bytes",
2906                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
2907                 .trigger = mem_cgroup_reset,
2908                 .read_u64 = mem_cgroup_read,
2909         },
2910         {
2911                 .name = "memsw.limit_in_bytes",
2912                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
2913                 .write_string = mem_cgroup_write,
2914                 .read_u64 = mem_cgroup_read,
2915         },
2916         {
2917                 .name = "memsw.failcnt",
2918                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
2919                 .trigger = mem_cgroup_reset,
2920                 .read_u64 = mem_cgroup_read,
2921         },
2922 };
2923
2924 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2925 {
2926         if (!do_swap_account)
2927                 return 0;
2928         return cgroup_add_files(cont, ss, memsw_cgroup_files,
2929                                 ARRAY_SIZE(memsw_cgroup_files));
2930 };
2931 #else
2932 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2933 {
2934         return 0;
2935 }
2936 #endif
2937
2938 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2939 {
2940         struct mem_cgroup_per_node *pn;
2941         struct mem_cgroup_per_zone *mz;
2942         enum lru_list l;
2943         int zone, tmp = node;
2944         /*
2945          * This routine is called against possible nodes.
2946          * But it's BUG to call kmalloc() against offline node.
2947          *
2948          * TODO: this routine can waste much memory for nodes which will
2949          *       never be onlined. It's better to use memory hotplug callback
2950          *       function.
2951          */
2952         if (!node_state(node, N_NORMAL_MEMORY))
2953                 tmp = -1;
2954         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
2955         if (!pn)
2956                 return 1;
2957
2958         mem->info.nodeinfo[node] = pn;
2959         memset(pn, 0, sizeof(*pn));
2960
2961         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
2962                 mz = &pn->zoneinfo[zone];
2963                 for_each_lru(l)
2964                         INIT_LIST_HEAD(&mz->lists[l]);
2965                 mz->usage_in_excess = 0;
2966                 mz->on_tree = false;
2967                 mz->mem = mem;
2968         }
2969         return 0;
2970 }
2971
2972 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2973 {
2974         kfree(mem->info.nodeinfo[node]);
2975 }
2976
2977 static int mem_cgroup_size(void)
2978 {
2979         int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
2980         return sizeof(struct mem_cgroup) + cpustat_size;
2981 }
2982
2983 static struct mem_cgroup *mem_cgroup_alloc(void)
2984 {
2985         struct mem_cgroup *mem;
2986         int size = mem_cgroup_size();
2987
2988         if (size < PAGE_SIZE)
2989                 mem = kmalloc(size, GFP_KERNEL);
2990         else
2991                 mem = vmalloc(size);
2992
2993         if (mem)
2994                 memset(mem, 0, size);
2995         return mem;
2996 }
2997
2998 /*
2999  * At destroying mem_cgroup, references from swap_cgroup can remain.
3000  * (scanning all at force_empty is too costly...)
3001  *
3002  * Instead of clearing all references at force_empty, we remember
3003  * the number of reference from swap_cgroup and free mem_cgroup when
3004  * it goes down to 0.
3005  *
3006  * Removal of cgroup itself succeeds regardless of refs from swap.
3007  */
3008
3009 static void __mem_cgroup_free(struct mem_cgroup *mem)
3010 {
3011         int node;
3012
3013         mem_cgroup_remove_from_trees(mem);
3014         free_css_id(&mem_cgroup_subsys, &mem->css);
3015
3016         for_each_node_state(node, N_POSSIBLE)
3017                 free_mem_cgroup_per_zone_info(mem, node);
3018
3019         if (mem_cgroup_size() < PAGE_SIZE)
3020                 kfree(mem);
3021         else
3022                 vfree(mem);
3023 }
3024
3025 static void mem_cgroup_get(struct mem_cgroup *mem)
3026 {
3027         atomic_inc(&mem->refcnt);
3028 }
3029
3030 static void mem_cgroup_put(struct mem_cgroup *mem)
3031 {
3032         if (atomic_dec_and_test(&mem->refcnt)) {
3033                 struct mem_cgroup *parent = parent_mem_cgroup(mem);
3034                 __mem_cgroup_free(mem);
3035                 if (parent)
3036                         mem_cgroup_put(parent);
3037         }
3038 }
3039
3040 /*
3041  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
3042  */
3043 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
3044 {
3045         if (!mem->res.parent)
3046                 return NULL;
3047         return mem_cgroup_from_res_counter(mem->res.parent, res);
3048 }
3049
3050 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3051 static void __init enable_swap_cgroup(void)
3052 {
3053         if (!mem_cgroup_disabled() && really_do_swap_account)
3054                 do_swap_account = 1;
3055 }
3056 #else
3057 static void __init enable_swap_cgroup(void)
3058 {
3059 }
3060 #endif
3061
3062 static int mem_cgroup_soft_limit_tree_init(void)
3063 {
3064         struct mem_cgroup_tree_per_node *rtpn;
3065         struct mem_cgroup_tree_per_zone *rtpz;
3066         int tmp, node, zone;
3067
3068         for_each_node_state(node, N_POSSIBLE) {
3069                 tmp = node;
3070                 if (!node_state(node, N_NORMAL_MEMORY))
3071                         tmp = -1;
3072                 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
3073                 if (!rtpn)
3074                         return 1;
3075
3076                 soft_limit_tree.rb_tree_per_node[node] = rtpn;
3077
3078                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3079                         rtpz = &rtpn->rb_tree_per_zone[zone];
3080                         rtpz->rb_root = RB_ROOT;
3081                         spin_lock_init(&rtpz->lock);
3082                 }
3083         }
3084         return 0;
3085 }
3086
3087 static struct cgroup_subsys_state * __ref
3088 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
3089 {
3090         struct mem_cgroup *mem, *parent;
3091         long error = -ENOMEM;
3092         int node;
3093
3094         mem = mem_cgroup_alloc();
3095         if (!mem)
3096                 return ERR_PTR(error);
3097
3098         for_each_node_state(node, N_POSSIBLE)
3099                 if (alloc_mem_cgroup_per_zone_info(mem, node))
3100                         goto free_out;
3101
3102         /* root ? */
3103         if (cont->parent == NULL) {
3104                 enable_swap_cgroup();
3105                 parent = NULL;
3106                 root_mem_cgroup = mem;
3107                 if (mem_cgroup_soft_limit_tree_init())
3108                         goto free_out;
3109
3110         } else {
3111                 parent = mem_cgroup_from_cont(cont->parent);
3112                 mem->use_hierarchy = parent->use_hierarchy;
3113         }
3114
3115         if (parent && parent->use_hierarchy) {
3116                 res_counter_init(&mem->res, &parent->res);
3117                 res_counter_init(&mem->memsw, &parent->memsw);
3118                 /*
3119                  * We increment refcnt of the parent to ensure that we can
3120                  * safely access it on res_counter_charge/uncharge.
3121                  * This refcnt will be decremented when freeing this
3122                  * mem_cgroup(see mem_cgroup_put).
3123                  */
3124                 mem_cgroup_get(parent);
3125         } else {
3126                 res_counter_init(&mem->res, NULL);
3127                 res_counter_init(&mem->memsw, NULL);
3128         }
3129         mem->last_scanned_child = 0;
3130         spin_lock_init(&mem->reclaim_param_lock);
3131
3132         if (parent)
3133                 mem->swappiness = get_swappiness(parent);
3134         atomic_set(&mem->refcnt, 1);
3135         return &mem->css;
3136 free_out:
3137         __mem_cgroup_free(mem);
3138         root_mem_cgroup = NULL;
3139         return ERR_PTR(error);
3140 }
3141
3142 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
3143                                         struct cgroup *cont)
3144 {
3145         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3146
3147         return mem_cgroup_force_empty(mem, false);
3148 }
3149
3150 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
3151                                 struct cgroup *cont)
3152 {
3153         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3154
3155         mem_cgroup_put(mem);
3156 }
3157
3158 static int mem_cgroup_populate(struct cgroup_subsys *ss,
3159                                 struct cgroup *cont)
3160 {
3161         int ret;
3162
3163         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
3164                                 ARRAY_SIZE(mem_cgroup_files));
3165
3166         if (!ret)
3167                 ret = register_memsw_files(cont, ss);
3168         return ret;
3169 }
3170
3171 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
3172                                 struct cgroup *cont,
3173                                 struct cgroup *old_cont,
3174                                 struct task_struct *p,
3175                                 bool threadgroup)
3176 {
3177         mutex_lock(&memcg_tasklist);
3178         /*
3179          * FIXME: It's better to move charges of this process from old
3180          * memcg to new memcg. But it's just on TODO-List now.
3181          */
3182         mutex_unlock(&memcg_tasklist);
3183 }
3184
3185 struct cgroup_subsys mem_cgroup_subsys = {
3186         .name = "memory",
3187         .subsys_id = mem_cgroup_subsys_id,
3188         .create = mem_cgroup_create,
3189         .pre_destroy = mem_cgroup_pre_destroy,
3190         .destroy = mem_cgroup_destroy,
3191         .populate = mem_cgroup_populate,
3192         .attach = mem_cgroup_move_task,
3193         .early_init = 0,
3194         .use_id = 1,
3195 };
3196
3197 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3198
3199 static int __init disable_swap_account(char *s)
3200 {
3201         really_do_swap_account = 0;
3202         return 1;
3203 }
3204 __setup("noswapaccount", disable_swap_account);
3205 #endif