2 #include <linux/mmzone.h>
3 #include <linux/bootmem.h>
4 #include <linux/bit_spinlock.h>
5 #include <linux/page_cgroup.h>
6 #include <linux/hash.h>
7 #include <linux/slab.h>
8 #include <linux/memory.h>
9 #include <linux/vmalloc.h>
10 #include <linux/cgroup.h>
11 #include <linux/swapops.h>
12 #include <linux/kmemleak.h>
14 static unsigned long total_usage;
16 #if !defined(CONFIG_SPARSEMEM)
19 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
21 pgdat->node_page_cgroup = NULL;
24 struct page_cgroup *lookup_page_cgroup(struct page *page)
26 unsigned long pfn = page_to_pfn(page);
28 struct page_cgroup *base;
30 base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
31 #ifdef CONFIG_DEBUG_VM
33 * The sanity checks the page allocator does upon freeing a
34 * page can reach here before the page_cgroup arrays are
35 * allocated when feeding a range of pages to the allocator
36 * for the first time during bootup or memory hotplug.
41 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
45 static int __init alloc_node_page_cgroup(int nid)
47 struct page_cgroup *base;
48 unsigned long table_size;
49 unsigned long nr_pages;
51 nr_pages = NODE_DATA(nid)->node_spanned_pages;
55 table_size = sizeof(struct page_cgroup) * nr_pages;
57 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
58 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
61 NODE_DATA(nid)->node_page_cgroup = base;
62 total_usage += table_size;
66 void __init page_cgroup_init_flatmem(void)
71 if (mem_cgroup_disabled())
74 for_each_online_node(nid) {
75 fail = alloc_node_page_cgroup(nid);
79 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
80 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you"
81 " don't want memory cgroups\n");
84 printk(KERN_CRIT "allocation of page_cgroup failed.\n");
85 printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n");
86 panic("Out of memory");
89 #else /* CONFIG_FLAT_NODE_MEM_MAP */
91 struct page_cgroup *lookup_page_cgroup(struct page *page)
93 unsigned long pfn = page_to_pfn(page);
94 struct mem_section *section = __pfn_to_section(pfn);
95 #ifdef CONFIG_DEBUG_VM
97 * The sanity checks the page allocator does upon freeing a
98 * page can reach here before the page_cgroup arrays are
99 * allocated when feeding a range of pages to the allocator
100 * for the first time during bootup or memory hotplug.
102 if (!section->page_cgroup)
105 return section->page_cgroup + pfn;
108 static void *__meminit alloc_page_cgroup(size_t size, int nid)
110 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
113 addr = alloc_pages_exact_nid(nid, size, flags);
115 kmemleak_alloc(addr, size, 1, flags);
119 if (node_state(nid, N_HIGH_MEMORY))
120 addr = vzalloc_node(size, nid);
122 addr = vzalloc(size);
127 #ifdef CONFIG_MEMORY_HOTPLUG
128 static void free_page_cgroup(void *addr)
130 if (is_vmalloc_addr(addr)) {
133 struct page *page = virt_to_page(addr);
135 sizeof(struct page_cgroup) * PAGES_PER_SECTION;
137 BUG_ON(PageReserved(page));
138 free_pages_exact(addr, table_size);
143 static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
145 struct mem_section *section;
146 struct page_cgroup *base;
147 unsigned long table_size;
149 section = __pfn_to_section(pfn);
151 if (section->page_cgroup)
154 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
155 base = alloc_page_cgroup(table_size, nid);
158 * The value stored in section->page_cgroup is (base - pfn)
159 * and it does not point to the memory block allocated above,
160 * causing kmemleak false positives.
162 kmemleak_not_leak(base);
165 printk(KERN_ERR "page cgroup allocation failure\n");
170 * The passed "pfn" may not be aligned to SECTION. For the calculation
171 * we need to apply a mask.
173 pfn &= PAGE_SECTION_MASK;
174 section->page_cgroup = base - pfn;
175 total_usage += table_size;
178 #ifdef CONFIG_MEMORY_HOTPLUG
179 void __free_page_cgroup(unsigned long pfn)
181 struct mem_section *ms;
182 struct page_cgroup *base;
184 ms = __pfn_to_section(pfn);
185 if (!ms || !ms->page_cgroup)
187 base = ms->page_cgroup + pfn;
188 free_page_cgroup(base);
189 ms->page_cgroup = NULL;
192 int __meminit online_page_cgroup(unsigned long start_pfn,
193 unsigned long nr_pages,
196 unsigned long start, end, pfn;
199 start = SECTION_ALIGN_DOWN(start_pfn);
200 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
204 * In this case, "nid" already exists and contains valid memory.
205 * "start_pfn" passed to us is a pfn which is an arg for
206 * online__pages(), and start_pfn should exist.
208 nid = pfn_to_nid(start_pfn);
209 VM_BUG_ON(!node_state(nid, N_ONLINE));
212 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
213 if (!pfn_present(pfn))
215 fail = init_section_page_cgroup(pfn, nid);
221 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
222 __free_page_cgroup(pfn);
227 int __meminit offline_page_cgroup(unsigned long start_pfn,
228 unsigned long nr_pages, int nid)
230 unsigned long start, end, pfn;
232 start = SECTION_ALIGN_DOWN(start_pfn);
233 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
235 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
236 __free_page_cgroup(pfn);
241 static int __meminit page_cgroup_callback(struct notifier_block *self,
242 unsigned long action, void *arg)
244 struct memory_notify *mn = arg;
247 case MEM_GOING_ONLINE:
248 ret = online_page_cgroup(mn->start_pfn,
249 mn->nr_pages, mn->status_change_nid);
252 offline_page_cgroup(mn->start_pfn,
253 mn->nr_pages, mn->status_change_nid);
255 case MEM_CANCEL_ONLINE:
256 case MEM_GOING_OFFLINE:
259 case MEM_CANCEL_OFFLINE:
263 return notifier_from_errno(ret);
268 void __init page_cgroup_init(void)
273 if (mem_cgroup_disabled())
276 for_each_node_state(nid, N_HIGH_MEMORY) {
277 unsigned long start_pfn, end_pfn;
279 start_pfn = node_start_pfn(nid);
280 end_pfn = node_end_pfn(nid);
282 * start_pfn and end_pfn may not be aligned to SECTION and the
283 * page->flags of out of node pages are not initialized. So we
284 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
286 for (pfn = start_pfn;
288 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
293 * Nodes's pfns can be overlapping.
294 * We know some arch can have a nodes layout such as
295 * -------------pfn-------------->
296 * N0 | N1 | N2 | N0 | N1 | N2|....
298 if (pfn_to_nid(pfn) != nid)
300 if (init_section_page_cgroup(pfn, nid))
304 hotplug_memory_notifier(page_cgroup_callback, 0);
305 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
306 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you "
307 "don't want memory cgroups\n");
310 printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
311 panic("Out of memory");
314 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
322 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
324 static DEFINE_MUTEX(swap_cgroup_mutex);
325 struct swap_cgroup_ctrl {
327 unsigned long length;
331 static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
336 #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
337 #define SC_POS_MASK (SC_PER_PAGE - 1)
340 * SwapCgroup implements "lookup" and "exchange" operations.
341 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
342 * against SwapCache. At swap_free(), this is accessed directly from swap.
345 * - we have no race in "exchange" when we're accessed via SwapCache because
346 * SwapCache(and its swp_entry) is under lock.
347 * - When called via swap_free(), there is no user of this entry and no race.
348 * Then, we don't need lock around "exchange".
350 * TODO: we can push these buffers out to HIGHMEM.
354 * allocate buffer for swap_cgroup.
356 static int swap_cgroup_prepare(int type)
359 struct swap_cgroup_ctrl *ctrl;
360 unsigned long idx, max;
362 ctrl = &swap_cgroup_ctrl[type];
364 for (idx = 0; idx < ctrl->length; idx++) {
365 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
367 goto not_enough_page;
368 ctrl->map[idx] = page;
373 for (idx = 0; idx < max; idx++)
374 __free_page(ctrl->map[idx]);
380 * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
381 * @end: swap entry to be cmpxchged
385 * Returns old id at success, 0 at failure.
386 * (There is no mem_cgroup using 0 as its id)
388 unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
389 unsigned short old, unsigned short new)
391 int type = swp_type(ent);
392 unsigned long offset = swp_offset(ent);
393 unsigned long idx = offset / SC_PER_PAGE;
394 unsigned long pos = offset & SC_POS_MASK;
395 struct swap_cgroup_ctrl *ctrl;
396 struct page *mappage;
397 struct swap_cgroup *sc;
399 unsigned short retval;
401 ctrl = &swap_cgroup_ctrl[type];
403 mappage = ctrl->map[idx];
404 sc = page_address(mappage);
406 spin_lock_irqsave(&ctrl->lock, flags);
412 spin_unlock_irqrestore(&ctrl->lock, flags);
417 * swap_cgroup_record - record mem_cgroup for this swp_entry.
418 * @ent: swap entry to be recorded into
419 * @mem: mem_cgroup to be recorded
421 * Returns old value at success, 0 at failure.
422 * (Of course, old value can be 0.)
424 unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
426 int type = swp_type(ent);
427 unsigned long offset = swp_offset(ent);
428 unsigned long idx = offset / SC_PER_PAGE;
429 unsigned long pos = offset & SC_POS_MASK;
430 struct swap_cgroup_ctrl *ctrl;
431 struct page *mappage;
432 struct swap_cgroup *sc;
436 ctrl = &swap_cgroup_ctrl[type];
438 mappage = ctrl->map[idx];
439 sc = page_address(mappage);
441 spin_lock_irqsave(&ctrl->lock, flags);
444 spin_unlock_irqrestore(&ctrl->lock, flags);
450 * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
451 * @ent: swap entry to be looked up.
453 * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
455 unsigned short lookup_swap_cgroup(swp_entry_t ent)
457 int type = swp_type(ent);
458 unsigned long offset = swp_offset(ent);
459 unsigned long idx = offset / SC_PER_PAGE;
460 unsigned long pos = offset & SC_POS_MASK;
461 struct swap_cgroup_ctrl *ctrl;
462 struct page *mappage;
463 struct swap_cgroup *sc;
466 ctrl = &swap_cgroup_ctrl[type];
467 mappage = ctrl->map[idx];
468 sc = page_address(mappage);
474 int swap_cgroup_swapon(int type, unsigned long max_pages)
477 unsigned long array_size;
478 unsigned long length;
479 struct swap_cgroup_ctrl *ctrl;
481 if (!do_swap_account)
484 length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
485 array_size = length * sizeof(void *);
487 array = vzalloc(array_size);
491 ctrl = &swap_cgroup_ctrl[type];
492 mutex_lock(&swap_cgroup_mutex);
493 ctrl->length = length;
495 spin_lock_init(&ctrl->lock);
496 if (swap_cgroup_prepare(type)) {
497 /* memory shortage */
500 mutex_unlock(&swap_cgroup_mutex);
504 mutex_unlock(&swap_cgroup_mutex);
508 printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
510 "swap_cgroup can be disabled by swapaccount=0 boot option\n");
514 void swap_cgroup_swapoff(int type)
517 unsigned long i, length;
518 struct swap_cgroup_ctrl *ctrl;
520 if (!do_swap_account)
523 mutex_lock(&swap_cgroup_mutex);
524 ctrl = &swap_cgroup_ctrl[type];
526 length = ctrl->length;
529 mutex_unlock(&swap_cgroup_mutex);
532 for (i = 0; i < length; i++) {
533 struct page *page = map[i];