]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - mm/vmstat.c
fs/ncpfs/dir.c: remove unnecessary new_valid_dev() check
[karo-tx-linux.git] / mm / vmstat.c
1 /*
2  *  linux/mm/vmstat.c
3  *
4  *  Manages VM statistics
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  zoned VM statistics
8  *  Copyright (C) 2006 Silicon Graphics, Inc.,
9  *              Christoph Lameter <christoph@lameter.com>
10  *  Copyright (C) 2008-2014 Christoph Lameter
11  */
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/cpu.h>
18 #include <linux/cpumask.h>
19 #include <linux/vmstat.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/debugfs.h>
23 #include <linux/sched.h>
24 #include <linux/math64.h>
25 #include <linux/writeback.h>
26 #include <linux/compaction.h>
27 #include <linux/mm_inline.h>
28 #include <linux/page_ext.h>
29 #include <linux/page_owner.h>
30
31 #include "internal.h"
32
33 #ifdef CONFIG_VM_EVENT_COUNTERS
34 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
35 EXPORT_PER_CPU_SYMBOL(vm_event_states);
36
37 static void sum_vm_events(unsigned long *ret)
38 {
39         int cpu;
40         int i;
41
42         memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
43
44         for_each_online_cpu(cpu) {
45                 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
46
47                 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
48                         ret[i] += this->event[i];
49         }
50 }
51
52 /*
53  * Accumulate the vm event counters across all CPUs.
54  * The result is unavoidably approximate - it can change
55  * during and after execution of this function.
56 */
57 void all_vm_events(unsigned long *ret)
58 {
59         get_online_cpus();
60         sum_vm_events(ret);
61         put_online_cpus();
62 }
63 EXPORT_SYMBOL_GPL(all_vm_events);
64
65 /*
66  * Fold the foreign cpu events into our own.
67  *
68  * This is adding to the events on one processor
69  * but keeps the global counts constant.
70  */
71 void vm_events_fold_cpu(int cpu)
72 {
73         struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
74         int i;
75
76         for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
77                 count_vm_events(i, fold_state->event[i]);
78                 fold_state->event[i] = 0;
79         }
80 }
81
82 #endif /* CONFIG_VM_EVENT_COUNTERS */
83
84 /*
85  * Manage combined zone based / global counters
86  *
87  * vm_stat contains the global counters
88  */
89 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
90 EXPORT_SYMBOL(vm_stat);
91
92 #ifdef CONFIG_SMP
93
94 int calculate_pressure_threshold(struct zone *zone)
95 {
96         int threshold;
97         int watermark_distance;
98
99         /*
100          * As vmstats are not up to date, there is drift between the estimated
101          * and real values. For high thresholds and a high number of CPUs, it
102          * is possible for the min watermark to be breached while the estimated
103          * value looks fine. The pressure threshold is a reduced value such
104          * that even the maximum amount of drift will not accidentally breach
105          * the min watermark
106          */
107         watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
108         threshold = max(1, (int)(watermark_distance / num_online_cpus()));
109
110         /*
111          * Maximum threshold is 125
112          */
113         threshold = min(125, threshold);
114
115         return threshold;
116 }
117
118 int calculate_normal_threshold(struct zone *zone)
119 {
120         int threshold;
121         int mem;        /* memory in 128 MB units */
122
123         /*
124          * The threshold scales with the number of processors and the amount
125          * of memory per zone. More memory means that we can defer updates for
126          * longer, more processors could lead to more contention.
127          * fls() is used to have a cheap way of logarithmic scaling.
128          *
129          * Some sample thresholds:
130          *
131          * Threshold    Processors      (fls)   Zonesize        fls(mem+1)
132          * ------------------------------------------------------------------
133          * 8            1               1       0.9-1 GB        4
134          * 16           2               2       0.9-1 GB        4
135          * 20           2               2       1-2 GB          5
136          * 24           2               2       2-4 GB          6
137          * 28           2               2       4-8 GB          7
138          * 32           2               2       8-16 GB         8
139          * 4            2               2       <128M           1
140          * 30           4               3       2-4 GB          5
141          * 48           4               3       8-16 GB         8
142          * 32           8               4       1-2 GB          4
143          * 32           8               4       0.9-1GB         4
144          * 10           16              5       <128M           1
145          * 40           16              5       900M            4
146          * 70           64              7       2-4 GB          5
147          * 84           64              7       4-8 GB          6
148          * 108          512             9       4-8 GB          6
149          * 125          1024            10      8-16 GB         8
150          * 125          1024            10      16-32 GB        9
151          */
152
153         mem = zone->managed_pages >> (27 - PAGE_SHIFT);
154
155         threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
156
157         /*
158          * Maximum threshold is 125
159          */
160         threshold = min(125, threshold);
161
162         return threshold;
163 }
164
165 /*
166  * Refresh the thresholds for each zone.
167  */
168 void refresh_zone_stat_thresholds(void)
169 {
170         struct zone *zone;
171         int cpu;
172         int threshold;
173
174         for_each_populated_zone(zone) {
175                 unsigned long max_drift, tolerate_drift;
176
177                 threshold = calculate_normal_threshold(zone);
178
179                 for_each_online_cpu(cpu)
180                         per_cpu_ptr(zone->pageset, cpu)->stat_threshold
181                                                         = threshold;
182
183                 /*
184                  * Only set percpu_drift_mark if there is a danger that
185                  * NR_FREE_PAGES reports the low watermark is ok when in fact
186                  * the min watermark could be breached by an allocation
187                  */
188                 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
189                 max_drift = num_online_cpus() * threshold;
190                 if (max_drift > tolerate_drift)
191                         zone->percpu_drift_mark = high_wmark_pages(zone) +
192                                         max_drift;
193         }
194 }
195
196 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
197                                 int (*calculate_pressure)(struct zone *))
198 {
199         struct zone *zone;
200         int cpu;
201         int threshold;
202         int i;
203
204         for (i = 0; i < pgdat->nr_zones; i++) {
205                 zone = &pgdat->node_zones[i];
206                 if (!zone->percpu_drift_mark)
207                         continue;
208
209                 threshold = (*calculate_pressure)(zone);
210                 for_each_online_cpu(cpu)
211                         per_cpu_ptr(zone->pageset, cpu)->stat_threshold
212                                                         = threshold;
213         }
214 }
215
216 /*
217  * For use when we know that interrupts are disabled,
218  * or when we know that preemption is disabled and that
219  * particular counter cannot be updated from interrupt context.
220  */
221 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
222                                 int delta)
223 {
224         struct per_cpu_pageset __percpu *pcp = zone->pageset;
225         s8 __percpu *p = pcp->vm_stat_diff + item;
226         long x;
227         long t;
228
229         x = delta + __this_cpu_read(*p);
230
231         t = __this_cpu_read(pcp->stat_threshold);
232
233         if (unlikely(x > t || x < -t)) {
234                 zone_page_state_add(x, zone, item);
235                 x = 0;
236         }
237         __this_cpu_write(*p, x);
238 }
239 EXPORT_SYMBOL(__mod_zone_page_state);
240
241 /*
242  * Optimized increment and decrement functions.
243  *
244  * These are only for a single page and therefore can take a struct page *
245  * argument instead of struct zone *. This allows the inclusion of the code
246  * generated for page_zone(page) into the optimized functions.
247  *
248  * No overflow check is necessary and therefore the differential can be
249  * incremented or decremented in place which may allow the compilers to
250  * generate better code.
251  * The increment or decrement is known and therefore one boundary check can
252  * be omitted.
253  *
254  * NOTE: These functions are very performance sensitive. Change only
255  * with care.
256  *
257  * Some processors have inc/dec instructions that are atomic vs an interrupt.
258  * However, the code must first determine the differential location in a zone
259  * based on the processor number and then inc/dec the counter. There is no
260  * guarantee without disabling preemption that the processor will not change
261  * in between and therefore the atomicity vs. interrupt cannot be exploited
262  * in a useful way here.
263  */
264 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
265 {
266         struct per_cpu_pageset __percpu *pcp = zone->pageset;
267         s8 __percpu *p = pcp->vm_stat_diff + item;
268         s8 v, t;
269
270         v = __this_cpu_inc_return(*p);
271         t = __this_cpu_read(pcp->stat_threshold);
272         if (unlikely(v > t)) {
273                 s8 overstep = t >> 1;
274
275                 zone_page_state_add(v + overstep, zone, item);
276                 __this_cpu_write(*p, -overstep);
277         }
278 }
279
280 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
281 {
282         __inc_zone_state(page_zone(page), item);
283 }
284 EXPORT_SYMBOL(__inc_zone_page_state);
285
286 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
287 {
288         struct per_cpu_pageset __percpu *pcp = zone->pageset;
289         s8 __percpu *p = pcp->vm_stat_diff + item;
290         s8 v, t;
291
292         v = __this_cpu_dec_return(*p);
293         t = __this_cpu_read(pcp->stat_threshold);
294         if (unlikely(v < - t)) {
295                 s8 overstep = t >> 1;
296
297                 zone_page_state_add(v - overstep, zone, item);
298                 __this_cpu_write(*p, overstep);
299         }
300 }
301
302 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
303 {
304         __dec_zone_state(page_zone(page), item);
305 }
306 EXPORT_SYMBOL(__dec_zone_page_state);
307
308 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
309 /*
310  * If we have cmpxchg_local support then we do not need to incur the overhead
311  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
312  *
313  * mod_state() modifies the zone counter state through atomic per cpu
314  * operations.
315  *
316  * Overstep mode specifies how overstep should handled:
317  *     0       No overstepping
318  *     1       Overstepping half of threshold
319  *     -1      Overstepping minus half of threshold
320 */
321 static inline void mod_state(struct zone *zone,
322        enum zone_stat_item item, int delta, int overstep_mode)
323 {
324         struct per_cpu_pageset __percpu *pcp = zone->pageset;
325         s8 __percpu *p = pcp->vm_stat_diff + item;
326         long o, n, t, z;
327
328         do {
329                 z = 0;  /* overflow to zone counters */
330
331                 /*
332                  * The fetching of the stat_threshold is racy. We may apply
333                  * a counter threshold to the wrong the cpu if we get
334                  * rescheduled while executing here. However, the next
335                  * counter update will apply the threshold again and
336                  * therefore bring the counter under the threshold again.
337                  *
338                  * Most of the time the thresholds are the same anyways
339                  * for all cpus in a zone.
340                  */
341                 t = this_cpu_read(pcp->stat_threshold);
342
343                 o = this_cpu_read(*p);
344                 n = delta + o;
345
346                 if (n > t || n < -t) {
347                         int os = overstep_mode * (t >> 1) ;
348
349                         /* Overflow must be added to zone counters */
350                         z = n + os;
351                         n = -os;
352                 }
353         } while (this_cpu_cmpxchg(*p, o, n) != o);
354
355         if (z)
356                 zone_page_state_add(z, zone, item);
357 }
358
359 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
360                                         int delta)
361 {
362         mod_state(zone, item, delta, 0);
363 }
364 EXPORT_SYMBOL(mod_zone_page_state);
365
366 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
367 {
368         mod_state(zone, item, 1, 1);
369 }
370
371 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
372 {
373         mod_state(page_zone(page), item, 1, 1);
374 }
375 EXPORT_SYMBOL(inc_zone_page_state);
376
377 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
378 {
379         mod_state(page_zone(page), item, -1, -1);
380 }
381 EXPORT_SYMBOL(dec_zone_page_state);
382 #else
383 /*
384  * Use interrupt disable to serialize counter updates
385  */
386 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
387                                         int delta)
388 {
389         unsigned long flags;
390
391         local_irq_save(flags);
392         __mod_zone_page_state(zone, item, delta);
393         local_irq_restore(flags);
394 }
395 EXPORT_SYMBOL(mod_zone_page_state);
396
397 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
398 {
399         unsigned long flags;
400
401         local_irq_save(flags);
402         __inc_zone_state(zone, item);
403         local_irq_restore(flags);
404 }
405
406 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
407 {
408         unsigned long flags;
409         struct zone *zone;
410
411         zone = page_zone(page);
412         local_irq_save(flags);
413         __inc_zone_state(zone, item);
414         local_irq_restore(flags);
415 }
416 EXPORT_SYMBOL(inc_zone_page_state);
417
418 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
419 {
420         unsigned long flags;
421
422         local_irq_save(flags);
423         __dec_zone_page_state(page, item);
424         local_irq_restore(flags);
425 }
426 EXPORT_SYMBOL(dec_zone_page_state);
427 #endif
428
429
430 /*
431  * Fold a differential into the global counters.
432  * Returns the number of counters updated.
433  */
434 static int fold_diff(int *diff)
435 {
436         int i;
437         int changes = 0;
438
439         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
440                 if (diff[i]) {
441                         atomic_long_add(diff[i], &vm_stat[i]);
442                         changes++;
443         }
444         return changes;
445 }
446
447 /*
448  * Update the zone counters for the current cpu.
449  *
450  * Note that refresh_cpu_vm_stats strives to only access
451  * node local memory. The per cpu pagesets on remote zones are placed
452  * in the memory local to the processor using that pageset. So the
453  * loop over all zones will access a series of cachelines local to
454  * the processor.
455  *
456  * The call to zone_page_state_add updates the cachelines with the
457  * statistics in the remote zone struct as well as the global cachelines
458  * with the global counters. These could cause remote node cache line
459  * bouncing and will have to be only done when necessary.
460  *
461  * The function returns the number of global counters updated.
462  */
463 static int refresh_cpu_vm_stats(void)
464 {
465         struct zone *zone;
466         int i;
467         int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
468         int changes = 0;
469
470         for_each_populated_zone(zone) {
471                 struct per_cpu_pageset __percpu *p = zone->pageset;
472
473                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
474                         int v;
475
476                         v = this_cpu_xchg(p->vm_stat_diff[i], 0);
477                         if (v) {
478
479                                 atomic_long_add(v, &zone->vm_stat[i]);
480                                 global_diff[i] += v;
481 #ifdef CONFIG_NUMA
482                                 /* 3 seconds idle till flush */
483                                 __this_cpu_write(p->expire, 3);
484 #endif
485                         }
486                 }
487                 cond_resched();
488 #ifdef CONFIG_NUMA
489                 /*
490                  * Deal with draining the remote pageset of this
491                  * processor
492                  *
493                  * Check if there are pages remaining in this pageset
494                  * if not then there is nothing to expire.
495                  */
496                 if (!__this_cpu_read(p->expire) ||
497                                !__this_cpu_read(p->pcp.count))
498                         continue;
499
500                 /*
501                  * We never drain zones local to this processor.
502                  */
503                 if (zone_to_nid(zone) == numa_node_id()) {
504                         __this_cpu_write(p->expire, 0);
505                         continue;
506                 }
507
508                 if (__this_cpu_dec_return(p->expire))
509                         continue;
510
511                 if (__this_cpu_read(p->pcp.count)) {
512                         drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
513                         changes++;
514                 }
515 #endif
516         }
517         changes += fold_diff(global_diff);
518         return changes;
519 }
520
521 /*
522  * Fold the data for an offline cpu into the global array.
523  * There cannot be any access by the offline cpu and therefore
524  * synchronization is simplified.
525  */
526 void cpu_vm_stats_fold(int cpu)
527 {
528         struct zone *zone;
529         int i;
530         int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
531
532         for_each_populated_zone(zone) {
533                 struct per_cpu_pageset *p;
534
535                 p = per_cpu_ptr(zone->pageset, cpu);
536
537                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
538                         if (p->vm_stat_diff[i]) {
539                                 int v;
540
541                                 v = p->vm_stat_diff[i];
542                                 p->vm_stat_diff[i] = 0;
543                                 atomic_long_add(v, &zone->vm_stat[i]);
544                                 global_diff[i] += v;
545                         }
546         }
547
548         fold_diff(global_diff);
549 }
550
551 /*
552  * this is only called if !populated_zone(zone), which implies no other users of
553  * pset->vm_stat_diff[] exsist.
554  */
555 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
556 {
557         int i;
558
559         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
560                 if (pset->vm_stat_diff[i]) {
561                         int v = pset->vm_stat_diff[i];
562                         pset->vm_stat_diff[i] = 0;
563                         atomic_long_add(v, &zone->vm_stat[i]);
564                         atomic_long_add(v, &vm_stat[i]);
565                 }
566 }
567 #endif
568
569 #ifdef CONFIG_NUMA
570 /*
571  * zonelist = the list of zones passed to the allocator
572  * z        = the zone from which the allocation occurred.
573  *
574  * Must be called with interrupts disabled.
575  *
576  * When __GFP_OTHER_NODE is set assume the node of the preferred
577  * zone is the local node. This is useful for daemons who allocate
578  * memory on behalf of other processes.
579  */
580 void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
581 {
582         if (z->zone_pgdat == preferred_zone->zone_pgdat) {
583                 __inc_zone_state(z, NUMA_HIT);
584         } else {
585                 __inc_zone_state(z, NUMA_MISS);
586                 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
587         }
588         if (z->node == ((flags & __GFP_OTHER_NODE) ?
589                         preferred_zone->node : numa_node_id()))
590                 __inc_zone_state(z, NUMA_LOCAL);
591         else
592                 __inc_zone_state(z, NUMA_OTHER);
593 }
594
595 /*
596  * Determine the per node value of a stat item.
597  */
598 unsigned long node_page_state(int node, enum zone_stat_item item)
599 {
600         struct zone *zones = NODE_DATA(node)->node_zones;
601
602         return
603 #ifdef CONFIG_ZONE_DMA
604                 zone_page_state(&zones[ZONE_DMA], item) +
605 #endif
606 #ifdef CONFIG_ZONE_DMA32
607                 zone_page_state(&zones[ZONE_DMA32], item) +
608 #endif
609 #ifdef CONFIG_HIGHMEM
610                 zone_page_state(&zones[ZONE_HIGHMEM], item) +
611 #endif
612                 zone_page_state(&zones[ZONE_NORMAL], item) +
613                 zone_page_state(&zones[ZONE_MOVABLE], item);
614 }
615
616 #endif
617
618 #ifdef CONFIG_COMPACTION
619
620 struct contig_page_info {
621         unsigned long free_pages;
622         unsigned long free_blocks_total;
623         unsigned long free_blocks_suitable;
624 };
625
626 /*
627  * Calculate the number of free pages in a zone, how many contiguous
628  * pages are free and how many are large enough to satisfy an allocation of
629  * the target size. Note that this function makes no attempt to estimate
630  * how many suitable free blocks there *might* be if MOVABLE pages were
631  * migrated. Calculating that is possible, but expensive and can be
632  * figured out from userspace
633  */
634 static void fill_contig_page_info(struct zone *zone,
635                                 unsigned int suitable_order,
636                                 struct contig_page_info *info)
637 {
638         unsigned int order;
639
640         info->free_pages = 0;
641         info->free_blocks_total = 0;
642         info->free_blocks_suitable = 0;
643
644         for (order = 0; order < MAX_ORDER; order++) {
645                 unsigned long blocks;
646
647                 /* Count number of free blocks */
648                 blocks = zone->free_area[order].nr_free;
649                 info->free_blocks_total += blocks;
650
651                 /* Count free base pages */
652                 info->free_pages += blocks << order;
653
654                 /* Count the suitable free blocks */
655                 if (order >= suitable_order)
656                         info->free_blocks_suitable += blocks <<
657                                                 (order - suitable_order);
658         }
659 }
660
661 /*
662  * A fragmentation index only makes sense if an allocation of a requested
663  * size would fail. If that is true, the fragmentation index indicates
664  * whether external fragmentation or a lack of memory was the problem.
665  * The value can be used to determine if page reclaim or compaction
666  * should be used
667  */
668 static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
669 {
670         unsigned long requested = 1UL << order;
671
672         if (!info->free_blocks_total)
673                 return 0;
674
675         /* Fragmentation index only makes sense when a request would fail */
676         if (info->free_blocks_suitable)
677                 return -1000;
678
679         /*
680          * Index is between 0 and 1 so return within 3 decimal places
681          *
682          * 0 => allocation would fail due to lack of memory
683          * 1 => allocation would fail due to fragmentation
684          */
685         return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
686 }
687
688 /* Same as __fragmentation index but allocs contig_page_info on stack */
689 int fragmentation_index(struct zone *zone, unsigned int order)
690 {
691         struct contig_page_info info;
692
693         fill_contig_page_info(zone, order, &info);
694         return __fragmentation_index(order, &info);
695 }
696 #endif
697
698 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
699 #ifdef CONFIG_ZONE_DMA
700 #define TEXT_FOR_DMA(xx) xx "_dma",
701 #else
702 #define TEXT_FOR_DMA(xx)
703 #endif
704
705 #ifdef CONFIG_ZONE_DMA32
706 #define TEXT_FOR_DMA32(xx) xx "_dma32",
707 #else
708 #define TEXT_FOR_DMA32(xx)
709 #endif
710
711 #ifdef CONFIG_HIGHMEM
712 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
713 #else
714 #define TEXT_FOR_HIGHMEM(xx)
715 #endif
716
717 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
718                                         TEXT_FOR_HIGHMEM(xx) xx "_movable",
719
720 const char * const vmstat_text[] = {
721         /* enum zone_stat_item countes */
722         "nr_free_pages",
723         "nr_alloc_batch",
724         "nr_inactive_anon",
725         "nr_active_anon",
726         "nr_inactive_file",
727         "nr_active_file",
728         "nr_unevictable",
729         "nr_mlock",
730         "nr_anon_pages",
731         "nr_mapped",
732         "nr_file_pages",
733         "nr_dirty",
734         "nr_writeback",
735         "nr_slab_reclaimable",
736         "nr_slab_unreclaimable",
737         "nr_page_table_pages",
738         "nr_kernel_stack",
739         "nr_unstable",
740         "nr_bounce",
741         "nr_vmscan_write",
742         "nr_vmscan_immediate_reclaim",
743         "nr_writeback_temp",
744         "nr_isolated_anon",
745         "nr_isolated_file",
746         "nr_shmem",
747         "nr_dirtied",
748         "nr_written",
749         "nr_pages_scanned",
750
751 #ifdef CONFIG_NUMA
752         "numa_hit",
753         "numa_miss",
754         "numa_foreign",
755         "numa_interleave",
756         "numa_local",
757         "numa_other",
758 #endif
759         "workingset_refault",
760         "workingset_activate",
761         "workingset_nodereclaim",
762         "nr_anon_transparent_hugepages",
763         "nr_free_cma",
764
765         /* enum writeback_stat_item counters */
766         "nr_dirty_threshold",
767         "nr_dirty_background_threshold",
768
769 #ifdef CONFIG_VM_EVENT_COUNTERS
770         /* enum vm_event_item counters */
771         "pgpgin",
772         "pgpgout",
773         "pswpin",
774         "pswpout",
775
776         TEXTS_FOR_ZONES("pgalloc")
777
778         "pgfree",
779         "pgactivate",
780         "pgdeactivate",
781
782         "pgfault",
783         "pgmajfault",
784         "pglazyfreed",
785
786         TEXTS_FOR_ZONES("pgrefill")
787         TEXTS_FOR_ZONES("pgsteal_kswapd")
788         TEXTS_FOR_ZONES("pgsteal_direct")
789         TEXTS_FOR_ZONES("pgscan_kswapd")
790         TEXTS_FOR_ZONES("pgscan_direct")
791         "pgscan_direct_throttle",
792
793 #ifdef CONFIG_NUMA
794         "zone_reclaim_failed",
795 #endif
796         "pginodesteal",
797         "slabs_scanned",
798         "kswapd_inodesteal",
799         "kswapd_low_wmark_hit_quickly",
800         "kswapd_high_wmark_hit_quickly",
801         "pageoutrun",
802         "allocstall",
803
804         "pgrotated",
805
806         "drop_pagecache",
807         "drop_slab",
808
809 #ifdef CONFIG_NUMA_BALANCING
810         "numa_pte_updates",
811         "numa_huge_pte_updates",
812         "numa_hint_faults",
813         "numa_hint_faults_local",
814         "numa_pages_migrated",
815 #endif
816 #ifdef CONFIG_MIGRATION
817         "pgmigrate_success",
818         "pgmigrate_fail",
819 #endif
820 #ifdef CONFIG_COMPACTION
821         "compact_migrate_scanned",
822         "compact_free_scanned",
823         "compact_isolated",
824         "compact_stall",
825         "compact_fail",
826         "compact_success",
827 #endif
828
829 #ifdef CONFIG_HUGETLB_PAGE
830         "htlb_buddy_alloc_success",
831         "htlb_buddy_alloc_fail",
832 #endif
833         "unevictable_pgs_culled",
834         "unevictable_pgs_scanned",
835         "unevictable_pgs_rescued",
836         "unevictable_pgs_mlocked",
837         "unevictable_pgs_munlocked",
838         "unevictable_pgs_cleared",
839         "unevictable_pgs_stranded",
840
841 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
842         "thp_fault_alloc",
843         "thp_fault_fallback",
844         "thp_collapse_alloc",
845         "thp_collapse_alloc_failed",
846         "thp_split_page",
847         "thp_split_page_failed",
848         "thp_split_pmd",
849         "thp_zero_page_alloc",
850         "thp_zero_page_alloc_failed",
851 #endif
852 #ifdef CONFIG_MEMORY_BALLOON
853         "balloon_inflate",
854         "balloon_deflate",
855 #ifdef CONFIG_BALLOON_COMPACTION
856         "balloon_migrate",
857 #endif
858 #endif /* CONFIG_MEMORY_BALLOON */
859 #ifdef CONFIG_DEBUG_TLBFLUSH
860 #ifdef CONFIG_SMP
861         "nr_tlb_remote_flush",
862         "nr_tlb_remote_flush_received",
863 #endif /* CONFIG_SMP */
864         "nr_tlb_local_flush_all",
865         "nr_tlb_local_flush_one",
866 #endif /* CONFIG_DEBUG_TLBFLUSH */
867
868 #ifdef CONFIG_DEBUG_VM_VMACACHE
869         "vmacache_find_calls",
870         "vmacache_find_hits",
871         "vmacache_full_flushes",
872 #endif
873 #endif /* CONFIG_VM_EVENTS_COUNTERS */
874 };
875 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
876
877
878 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
879      defined(CONFIG_PROC_FS)
880 static void *frag_start(struct seq_file *m, loff_t *pos)
881 {
882         pg_data_t *pgdat;
883         loff_t node = *pos;
884
885         for (pgdat = first_online_pgdat();
886              pgdat && node;
887              pgdat = next_online_pgdat(pgdat))
888                 --node;
889
890         return pgdat;
891 }
892
893 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
894 {
895         pg_data_t *pgdat = (pg_data_t *)arg;
896
897         (*pos)++;
898         return next_online_pgdat(pgdat);
899 }
900
901 static void frag_stop(struct seq_file *m, void *arg)
902 {
903 }
904
905 /* Walk all the zones in a node and print using a callback */
906 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
907                 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
908 {
909         struct zone *zone;
910         struct zone *node_zones = pgdat->node_zones;
911         unsigned long flags;
912
913         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
914                 if (!populated_zone(zone))
915                         continue;
916
917                 spin_lock_irqsave(&zone->lock, flags);
918                 print(m, pgdat, zone);
919                 spin_unlock_irqrestore(&zone->lock, flags);
920         }
921 }
922 #endif
923
924 #ifdef CONFIG_PROC_FS
925 static char * const migratetype_names[MIGRATE_TYPES] = {
926         "Unmovable",
927         "Reclaimable",
928         "Movable",
929         "HighAtomic",
930 #ifdef CONFIG_CMA
931         "CMA",
932 #endif
933 #ifdef CONFIG_MEMORY_ISOLATION
934         "Isolate",
935 #endif
936 };
937
938 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
939                                                 struct zone *zone)
940 {
941         int order;
942
943         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
944         for (order = 0; order < MAX_ORDER; ++order)
945                 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
946         seq_putc(m, '\n');
947 }
948
949 /*
950  * This walks the free areas for each zone.
951  */
952 static int frag_show(struct seq_file *m, void *arg)
953 {
954         pg_data_t *pgdat = (pg_data_t *)arg;
955         walk_zones_in_node(m, pgdat, frag_show_print);
956         return 0;
957 }
958
959 static void pagetypeinfo_showfree_print(struct seq_file *m,
960                                         pg_data_t *pgdat, struct zone *zone)
961 {
962         int order, mtype;
963
964         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
965                 seq_printf(m, "Node %4d, zone %8s, type %12s ",
966                                         pgdat->node_id,
967                                         zone->name,
968                                         migratetype_names[mtype]);
969                 for (order = 0; order < MAX_ORDER; ++order) {
970                         unsigned long freecount = 0;
971                         struct free_area *area;
972                         struct list_head *curr;
973
974                         area = &(zone->free_area[order]);
975
976                         list_for_each(curr, &area->free_list[mtype])
977                                 freecount++;
978                         seq_printf(m, "%6lu ", freecount);
979                 }
980                 seq_putc(m, '\n');
981         }
982 }
983
984 /* Print out the free pages at each order for each migatetype */
985 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
986 {
987         int order;
988         pg_data_t *pgdat = (pg_data_t *)arg;
989
990         /* Print header */
991         seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
992         for (order = 0; order < MAX_ORDER; ++order)
993                 seq_printf(m, "%6d ", order);
994         seq_putc(m, '\n');
995
996         walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
997
998         return 0;
999 }
1000
1001 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1002                                         pg_data_t *pgdat, struct zone *zone)
1003 {
1004         int mtype;
1005         unsigned long pfn;
1006         unsigned long start_pfn = zone->zone_start_pfn;
1007         unsigned long end_pfn = zone_end_pfn(zone);
1008         unsigned long count[MIGRATE_TYPES] = { 0, };
1009
1010         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1011                 struct page *page;
1012
1013                 if (!pfn_valid(pfn))
1014                         continue;
1015
1016                 page = pfn_to_page(pfn);
1017
1018                 /* Watch for unexpected holes punched in the memmap */
1019                 if (!memmap_valid_within(pfn, page, zone))
1020                         continue;
1021
1022                 mtype = get_pageblock_migratetype(page);
1023
1024                 if (mtype < MIGRATE_TYPES)
1025                         count[mtype]++;
1026         }
1027
1028         /* Print counts */
1029         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1030         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1031                 seq_printf(m, "%12lu ", count[mtype]);
1032         seq_putc(m, '\n');
1033 }
1034
1035 /* Print out the free pages at each order for each migratetype */
1036 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1037 {
1038         int mtype;
1039         pg_data_t *pgdat = (pg_data_t *)arg;
1040
1041         seq_printf(m, "\n%-23s", "Number of blocks type ");
1042         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1043                 seq_printf(m, "%12s ", migratetype_names[mtype]);
1044         seq_putc(m, '\n');
1045         walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
1046
1047         return 0;
1048 }
1049
1050 #ifdef CONFIG_PAGE_OWNER
1051 static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
1052                                                         pg_data_t *pgdat,
1053                                                         struct zone *zone)
1054 {
1055         struct page *page;
1056         struct page_ext *page_ext;
1057         unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
1058         unsigned long end_pfn = pfn + zone->spanned_pages;
1059         unsigned long count[MIGRATE_TYPES] = { 0, };
1060         int pageblock_mt, page_mt;
1061         int i;
1062
1063         /* Scan block by block. First and last block may be incomplete */
1064         pfn = zone->zone_start_pfn;
1065
1066         /*
1067          * Walk the zone in pageblock_nr_pages steps. If a page block spans
1068          * a zone boundary, it will be double counted between zones. This does
1069          * not matter as the mixed block count will still be correct
1070          */
1071         for (; pfn < end_pfn; ) {
1072                 if (!pfn_valid(pfn)) {
1073                         pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
1074                         continue;
1075                 }
1076
1077                 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
1078                 block_end_pfn = min(block_end_pfn, end_pfn);
1079
1080                 page = pfn_to_page(pfn);
1081                 pageblock_mt = get_pfnblock_migratetype(page, pfn);
1082
1083                 for (; pfn < block_end_pfn; pfn++) {
1084                         if (!pfn_valid_within(pfn))
1085                                 continue;
1086
1087                         page = pfn_to_page(pfn);
1088                         if (PageBuddy(page)) {
1089                                 pfn += (1UL << page_order(page)) - 1;
1090                                 continue;
1091                         }
1092
1093                         if (PageReserved(page))
1094                                 continue;
1095
1096                         page_ext = lookup_page_ext(page);
1097
1098                         if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
1099                                 continue;
1100
1101                         page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
1102                         if (pageblock_mt != page_mt) {
1103                                 if (is_migrate_cma(pageblock_mt))
1104                                         count[MIGRATE_MOVABLE]++;
1105                                 else
1106                                         count[pageblock_mt]++;
1107
1108                                 pfn = block_end_pfn;
1109                                 break;
1110                         }
1111                         pfn += (1UL << page_ext->order) - 1;
1112                 }
1113         }
1114
1115         /* Print counts */
1116         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1117         for (i = 0; i < MIGRATE_TYPES; i++)
1118                 seq_printf(m, "%12lu ", count[i]);
1119         seq_putc(m, '\n');
1120 }
1121 #endif /* CONFIG_PAGE_OWNER */
1122
1123 /*
1124  * Print out the number of pageblocks for each migratetype that contain pages
1125  * of other types. This gives an indication of how well fallbacks are being
1126  * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1127  * to determine what is going on
1128  */
1129 static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1130 {
1131 #ifdef CONFIG_PAGE_OWNER
1132         int mtype;
1133
1134         if (!page_owner_inited)
1135                 return;
1136
1137         drain_all_pages(NULL);
1138
1139         seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1140         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1141                 seq_printf(m, "%12s ", migratetype_names[mtype]);
1142         seq_putc(m, '\n');
1143
1144         walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
1145 #endif /* CONFIG_PAGE_OWNER */
1146 }
1147
1148 /*
1149  * This prints out statistics in relation to grouping pages by mobility.
1150  * It is expensive to collect so do not constantly read the file.
1151  */
1152 static int pagetypeinfo_show(struct seq_file *m, void *arg)
1153 {
1154         pg_data_t *pgdat = (pg_data_t *)arg;
1155
1156         /* check memoryless node */
1157         if (!node_state(pgdat->node_id, N_MEMORY))
1158                 return 0;
1159
1160         seq_printf(m, "Page block order: %d\n", pageblock_order);
1161         seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1162         seq_putc(m, '\n');
1163         pagetypeinfo_showfree(m, pgdat);
1164         pagetypeinfo_showblockcount(m, pgdat);
1165         pagetypeinfo_showmixedcount(m, pgdat);
1166
1167         return 0;
1168 }
1169
1170 static const struct seq_operations fragmentation_op = {
1171         .start  = frag_start,
1172         .next   = frag_next,
1173         .stop   = frag_stop,
1174         .show   = frag_show,
1175 };
1176
1177 static int fragmentation_open(struct inode *inode, struct file *file)
1178 {
1179         return seq_open(file, &fragmentation_op);
1180 }
1181
1182 static const struct file_operations fragmentation_file_operations = {
1183         .open           = fragmentation_open,
1184         .read           = seq_read,
1185         .llseek         = seq_lseek,
1186         .release        = seq_release,
1187 };
1188
1189 static const struct seq_operations pagetypeinfo_op = {
1190         .start  = frag_start,
1191         .next   = frag_next,
1192         .stop   = frag_stop,
1193         .show   = pagetypeinfo_show,
1194 };
1195
1196 static int pagetypeinfo_open(struct inode *inode, struct file *file)
1197 {
1198         return seq_open(file, &pagetypeinfo_op);
1199 }
1200
1201 static const struct file_operations pagetypeinfo_file_ops = {
1202         .open           = pagetypeinfo_open,
1203         .read           = seq_read,
1204         .llseek         = seq_lseek,
1205         .release        = seq_release,
1206 };
1207
1208 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1209                                                         struct zone *zone)
1210 {
1211         int i;
1212         seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1213         seq_printf(m,
1214                    "\n  pages free     %lu"
1215                    "\n        min      %lu"
1216                    "\n        low      %lu"
1217                    "\n        high     %lu"
1218                    "\n        scanned  %lu"
1219                    "\n        spanned  %lu"
1220                    "\n        present  %lu"
1221                    "\n        managed  %lu",
1222                    zone_page_state(zone, NR_FREE_PAGES),
1223                    min_wmark_pages(zone),
1224                    low_wmark_pages(zone),
1225                    high_wmark_pages(zone),
1226                    zone_page_state(zone, NR_PAGES_SCANNED),
1227                    zone->spanned_pages,
1228                    zone->present_pages,
1229                    zone->managed_pages);
1230
1231         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1232                 seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
1233                                 zone_page_state(zone, i));
1234
1235         seq_printf(m,
1236                    "\n        protection: (%ld",
1237                    zone->lowmem_reserve[0]);
1238         for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1239                 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1240         seq_printf(m,
1241                    ")"
1242                    "\n  pagesets");
1243         for_each_online_cpu(i) {
1244                 struct per_cpu_pageset *pageset;
1245
1246                 pageset = per_cpu_ptr(zone->pageset, i);
1247                 seq_printf(m,
1248                            "\n    cpu: %i"
1249                            "\n              count: %i"
1250                            "\n              high:  %i"
1251                            "\n              batch: %i",
1252                            i,
1253                            pageset->pcp.count,
1254                            pageset->pcp.high,
1255                            pageset->pcp.batch);
1256 #ifdef CONFIG_SMP
1257                 seq_printf(m, "\n  vm stats threshold: %d",
1258                                 pageset->stat_threshold);
1259 #endif
1260         }
1261         seq_printf(m,
1262                    "\n  all_unreclaimable: %u"
1263                    "\n  start_pfn:         %lu"
1264                    "\n  inactive_ratio:    %u",
1265                    !zone_reclaimable(zone),
1266                    zone->zone_start_pfn,
1267                    zone->inactive_ratio);
1268         seq_putc(m, '\n');
1269 }
1270
1271 /*
1272  * Output information about zones in @pgdat.
1273  */
1274 static int zoneinfo_show(struct seq_file *m, void *arg)
1275 {
1276         pg_data_t *pgdat = (pg_data_t *)arg;
1277         walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1278         return 0;
1279 }
1280
1281 static const struct seq_operations zoneinfo_op = {
1282         .start  = frag_start, /* iterate over all zones. The same as in
1283                                * fragmentation. */
1284         .next   = frag_next,
1285         .stop   = frag_stop,
1286         .show   = zoneinfo_show,
1287 };
1288
1289 static int zoneinfo_open(struct inode *inode, struct file *file)
1290 {
1291         return seq_open(file, &zoneinfo_op);
1292 }
1293
1294 static const struct file_operations proc_zoneinfo_file_operations = {
1295         .open           = zoneinfo_open,
1296         .read           = seq_read,
1297         .llseek         = seq_lseek,
1298         .release        = seq_release,
1299 };
1300
1301 enum writeback_stat_item {
1302         NR_DIRTY_THRESHOLD,
1303         NR_DIRTY_BG_THRESHOLD,
1304         NR_VM_WRITEBACK_STAT_ITEMS,
1305 };
1306
1307 static void *vmstat_start(struct seq_file *m, loff_t *pos)
1308 {
1309         unsigned long *v;
1310         int i, stat_items_size;
1311
1312         if (*pos >= ARRAY_SIZE(vmstat_text))
1313                 return NULL;
1314         stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1315                           NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1316
1317 #ifdef CONFIG_VM_EVENT_COUNTERS
1318         stat_items_size += sizeof(struct vm_event_state);
1319 #endif
1320
1321         v = kmalloc(stat_items_size, GFP_KERNEL);
1322         m->private = v;
1323         if (!v)
1324                 return ERR_PTR(-ENOMEM);
1325         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1326                 v[i] = global_page_state(i);
1327         v += NR_VM_ZONE_STAT_ITEMS;
1328
1329         global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1330                             v + NR_DIRTY_THRESHOLD);
1331         v += NR_VM_WRITEBACK_STAT_ITEMS;
1332
1333 #ifdef CONFIG_VM_EVENT_COUNTERS
1334         all_vm_events(v);
1335         v[PGPGIN] /= 2;         /* sectors -> kbytes */
1336         v[PGPGOUT] /= 2;
1337 #endif
1338         return (unsigned long *)m->private + *pos;
1339 }
1340
1341 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1342 {
1343         (*pos)++;
1344         if (*pos >= ARRAY_SIZE(vmstat_text))
1345                 return NULL;
1346         return (unsigned long *)m->private + *pos;
1347 }
1348
1349 static int vmstat_show(struct seq_file *m, void *arg)
1350 {
1351         unsigned long *l = arg;
1352         unsigned long off = l - (unsigned long *)m->private;
1353
1354         seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1355         return 0;
1356 }
1357
1358 static void vmstat_stop(struct seq_file *m, void *arg)
1359 {
1360         kfree(m->private);
1361         m->private = NULL;
1362 }
1363
1364 static const struct seq_operations vmstat_op = {
1365         .start  = vmstat_start,
1366         .next   = vmstat_next,
1367         .stop   = vmstat_stop,
1368         .show   = vmstat_show,
1369 };
1370
1371 static int vmstat_open(struct inode *inode, struct file *file)
1372 {
1373         return seq_open(file, &vmstat_op);
1374 }
1375
1376 static const struct file_operations proc_vmstat_file_operations = {
1377         .open           = vmstat_open,
1378         .read           = seq_read,
1379         .llseek         = seq_lseek,
1380         .release        = seq_release,
1381 };
1382 #endif /* CONFIG_PROC_FS */
1383
1384 #ifdef CONFIG_SMP
1385 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1386 int sysctl_stat_interval __read_mostly = HZ;
1387 static cpumask_var_t cpu_stat_off;
1388
1389 static void vmstat_update(struct work_struct *w)
1390 {
1391         if (refresh_cpu_vm_stats()) {
1392                 /*
1393                  * Counters were updated so we expect more updates
1394                  * to occur in the future. Keep on running the
1395                  * update worker thread.
1396                  */
1397                 schedule_delayed_work_on(smp_processor_id(),
1398                         this_cpu_ptr(&vmstat_work),
1399                         round_jiffies_relative(sysctl_stat_interval));
1400         } else {
1401                 /*
1402                  * We did not update any counters so the app may be in
1403                  * a mode where it does not cause counter updates.
1404                  * We may be uselessly running vmstat_update.
1405                  * Defer the checking for differentials to the
1406                  * shepherd thread on a different processor.
1407                  */
1408                 int r;
1409                 /*
1410                  * Shepherd work thread does not race since it never
1411                  * changes the bit if its zero but the cpu
1412                  * online / off line code may race if
1413                  * worker threads are still allowed during
1414                  * shutdown / startup.
1415                  */
1416                 r = cpumask_test_and_set_cpu(smp_processor_id(),
1417                         cpu_stat_off);
1418                 VM_BUG_ON(r);
1419         }
1420 }
1421
1422 /*
1423  * Check if the diffs for a certain cpu indicate that
1424  * an update is needed.
1425  */
1426 static bool need_update(int cpu)
1427 {
1428         struct zone *zone;
1429
1430         for_each_populated_zone(zone) {
1431                 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1432
1433                 BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1434                 /*
1435                  * The fast way of checking if there are any vmstat diffs.
1436                  * This works because the diffs are byte sized items.
1437                  */
1438                 if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
1439                         return true;
1440
1441         }
1442         return false;
1443 }
1444
1445
1446 /*
1447  * Shepherd worker thread that checks the
1448  * differentials of processors that have their worker
1449  * threads for vm statistics updates disabled because of
1450  * inactivity.
1451  */
1452 static void vmstat_shepherd(struct work_struct *w);
1453
1454 static DECLARE_DELAYED_WORK(shepherd, vmstat_shepherd);
1455
1456 static void vmstat_shepherd(struct work_struct *w)
1457 {
1458         int cpu;
1459
1460         get_online_cpus();
1461         /* Check processors whose vmstat worker threads have been disabled */
1462         for_each_cpu(cpu, cpu_stat_off)
1463                 if (need_update(cpu) &&
1464                         cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
1465
1466                         schedule_delayed_work_on(cpu,
1467                                 &per_cpu(vmstat_work, cpu), 0);
1468
1469         put_online_cpus();
1470
1471         schedule_delayed_work(&shepherd,
1472                 round_jiffies_relative(sysctl_stat_interval));
1473
1474 }
1475
1476 static void __init start_shepherd_timer(void)
1477 {
1478         int cpu;
1479
1480         for_each_possible_cpu(cpu)
1481                 INIT_DELAYED_WORK(per_cpu_ptr(&vmstat_work, cpu),
1482                         vmstat_update);
1483
1484         if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
1485                 BUG();
1486         cpumask_copy(cpu_stat_off, cpu_online_mask);
1487
1488         schedule_delayed_work(&shepherd,
1489                 round_jiffies_relative(sysctl_stat_interval));
1490 }
1491
1492 static void vmstat_cpu_dead(int node)
1493 {
1494         int cpu;
1495
1496         get_online_cpus();
1497         for_each_online_cpu(cpu)
1498                 if (cpu_to_node(cpu) == node)
1499                         goto end;
1500
1501         node_clear_state(node, N_CPU);
1502 end:
1503         put_online_cpus();
1504 }
1505
1506 /*
1507  * Use the cpu notifier to insure that the thresholds are recalculated
1508  * when necessary.
1509  */
1510 static int vmstat_cpuup_callback(struct notifier_block *nfb,
1511                 unsigned long action,
1512                 void *hcpu)
1513 {
1514         long cpu = (long)hcpu;
1515
1516         switch (action) {
1517         case CPU_ONLINE:
1518         case CPU_ONLINE_FROZEN:
1519                 refresh_zone_stat_thresholds();
1520                 node_set_state(cpu_to_node(cpu), N_CPU);
1521                 cpumask_set_cpu(cpu, cpu_stat_off);
1522                 break;
1523         case CPU_DOWN_PREPARE:
1524         case CPU_DOWN_PREPARE_FROZEN:
1525                 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1526                 cpumask_clear_cpu(cpu, cpu_stat_off);
1527                 break;
1528         case CPU_DOWN_FAILED:
1529         case CPU_DOWN_FAILED_FROZEN:
1530                 cpumask_set_cpu(cpu, cpu_stat_off);
1531                 break;
1532         case CPU_DEAD:
1533         case CPU_DEAD_FROZEN:
1534                 refresh_zone_stat_thresholds();
1535                 vmstat_cpu_dead(cpu_to_node(cpu));
1536                 break;
1537         default:
1538                 break;
1539         }
1540         return NOTIFY_OK;
1541 }
1542
1543 static struct notifier_block vmstat_notifier =
1544         { &vmstat_cpuup_callback, NULL, 0 };
1545 #endif
1546
1547 static int __init setup_vmstat(void)
1548 {
1549 #ifdef CONFIG_SMP
1550         cpu_notifier_register_begin();
1551         __register_cpu_notifier(&vmstat_notifier);
1552
1553         start_shepherd_timer();
1554         cpu_notifier_register_done();
1555 #endif
1556 #ifdef CONFIG_PROC_FS
1557         proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
1558         proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1559         proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1560         proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1561 #endif
1562         return 0;
1563 }
1564 module_init(setup_vmstat)
1565
1566 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1567
1568 /*
1569  * Return an index indicating how much of the available free memory is
1570  * unusable for an allocation of the requested size.
1571  */
1572 static int unusable_free_index(unsigned int order,
1573                                 struct contig_page_info *info)
1574 {
1575         /* No free memory is interpreted as all free memory is unusable */
1576         if (info->free_pages == 0)
1577                 return 1000;
1578
1579         /*
1580          * Index should be a value between 0 and 1. Return a value to 3
1581          * decimal places.
1582          *
1583          * 0 => no fragmentation
1584          * 1 => high fragmentation
1585          */
1586         return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1587
1588 }
1589
1590 static void unusable_show_print(struct seq_file *m,
1591                                         pg_data_t *pgdat, struct zone *zone)
1592 {
1593         unsigned int order;
1594         int index;
1595         struct contig_page_info info;
1596
1597         seq_printf(m, "Node %d, zone %8s ",
1598                                 pgdat->node_id,
1599                                 zone->name);
1600         for (order = 0; order < MAX_ORDER; ++order) {
1601                 fill_contig_page_info(zone, order, &info);
1602                 index = unusable_free_index(order, &info);
1603                 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1604         }
1605
1606         seq_putc(m, '\n');
1607 }
1608
1609 /*
1610  * Display unusable free space index
1611  *
1612  * The unusable free space index measures how much of the available free
1613  * memory cannot be used to satisfy an allocation of a given size and is a
1614  * value between 0 and 1. The higher the value, the more of free memory is
1615  * unusable and by implication, the worse the external fragmentation is. This
1616  * can be expressed as a percentage by multiplying by 100.
1617  */
1618 static int unusable_show(struct seq_file *m, void *arg)
1619 {
1620         pg_data_t *pgdat = (pg_data_t *)arg;
1621
1622         /* check memoryless node */
1623         if (!node_state(pgdat->node_id, N_MEMORY))
1624                 return 0;
1625
1626         walk_zones_in_node(m, pgdat, unusable_show_print);
1627
1628         return 0;
1629 }
1630
1631 static const struct seq_operations unusable_op = {
1632         .start  = frag_start,
1633         .next   = frag_next,
1634         .stop   = frag_stop,
1635         .show   = unusable_show,
1636 };
1637
1638 static int unusable_open(struct inode *inode, struct file *file)
1639 {
1640         return seq_open(file, &unusable_op);
1641 }
1642
1643 static const struct file_operations unusable_file_ops = {
1644         .open           = unusable_open,
1645         .read           = seq_read,
1646         .llseek         = seq_lseek,
1647         .release        = seq_release,
1648 };
1649
1650 static void extfrag_show_print(struct seq_file *m,
1651                                         pg_data_t *pgdat, struct zone *zone)
1652 {
1653         unsigned int order;
1654         int index;
1655
1656         /* Alloc on stack as interrupts are disabled for zone walk */
1657         struct contig_page_info info;
1658
1659         seq_printf(m, "Node %d, zone %8s ",
1660                                 pgdat->node_id,
1661                                 zone->name);
1662         for (order = 0; order < MAX_ORDER; ++order) {
1663                 fill_contig_page_info(zone, order, &info);
1664                 index = __fragmentation_index(order, &info);
1665                 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1666         }
1667
1668         seq_putc(m, '\n');
1669 }
1670
1671 /*
1672  * Display fragmentation index for orders that allocations would fail for
1673  */
1674 static int extfrag_show(struct seq_file *m, void *arg)
1675 {
1676         pg_data_t *pgdat = (pg_data_t *)arg;
1677
1678         walk_zones_in_node(m, pgdat, extfrag_show_print);
1679
1680         return 0;
1681 }
1682
1683 static const struct seq_operations extfrag_op = {
1684         .start  = frag_start,
1685         .next   = frag_next,
1686         .stop   = frag_stop,
1687         .show   = extfrag_show,
1688 };
1689
1690 static int extfrag_open(struct inode *inode, struct file *file)
1691 {
1692         return seq_open(file, &extfrag_op);
1693 }
1694
1695 static const struct file_operations extfrag_file_ops = {
1696         .open           = extfrag_open,
1697         .read           = seq_read,
1698         .llseek         = seq_lseek,
1699         .release        = seq_release,
1700 };
1701
1702 static int __init extfrag_debug_init(void)
1703 {
1704         struct dentry *extfrag_debug_root;
1705
1706         extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1707         if (!extfrag_debug_root)
1708                 return -ENOMEM;
1709
1710         if (!debugfs_create_file("unusable_index", 0444,
1711                         extfrag_debug_root, NULL, &unusable_file_ops))
1712                 goto fail;
1713
1714         if (!debugfs_create_file("extfrag_index", 0444,
1715                         extfrag_debug_root, NULL, &extfrag_file_ops))
1716                 goto fail;
1717
1718         return 0;
1719 fail:
1720         debugfs_remove_recursive(extfrag_debug_root);
1721         return -ENOMEM;
1722 }
1723
1724 module_init(extfrag_debug_init);
1725 #endif