]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/vmstat.c
pwm: imx: indentation cleanup
[karo-tx-linux.git] / mm / vmstat.c
index 20c2ef4458fac9ba3a5af98afd34b135ec86e5cb..9bb314577911f50c06848373d273b3b993373858 100644 (file)
@@ -19,6 +19,9 @@
 #include <linux/math64.h>
 #include <linux/writeback.h>
 #include <linux/compaction.h>
+#include <linux/mm_inline.h>
+
+#include "internal.h"
 
 #ifdef CONFIG_VM_EVENT_COUNTERS
 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
@@ -414,12 +417,17 @@ void dec_zone_page_state(struct page *page, enum zone_stat_item item)
 EXPORT_SYMBOL(dec_zone_page_state);
 #endif
 
+static inline void fold_diff(int *diff)
+{
+       int i;
+
+       for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+               if (diff[i])
+                       atomic_long_add(diff[i], &vm_stat[i]);
+}
+
 /*
- * Update the zone counters for one cpu.
- *
- * The cpu specified must be either the current cpu or a processor that
- * is not online. If it is the current cpu then the execution thread must
- * be pinned to the current cpu.
+ * Update the zone counters for the current cpu.
  *
  * Note that refresh_cpu_vm_stats strives to only access
  * node local memory. The per cpu pagesets on remote zones are placed
@@ -432,33 +440,29 @@ EXPORT_SYMBOL(dec_zone_page_state);
  * with the global counters. These could cause remote node cache line
  * bouncing and will have to be only done when necessary.
  */
-void refresh_cpu_vm_stats(int cpu)
+static void refresh_cpu_vm_stats(void)
 {
        struct zone *zone;
        int i;
        int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 
        for_each_populated_zone(zone) {
-               struct per_cpu_pageset *p;
+               struct per_cpu_pageset __percpu *p = zone->pageset;
 
-               p = per_cpu_ptr(zone->pageset, cpu);
+               for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
+                       int v;
 
-               for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
-                       if (p->vm_stat_diff[i]) {
-                               unsigned long flags;
-                               int v;
+                       v = this_cpu_xchg(p->vm_stat_diff[i], 0);
+                       if (v) {
 
-                               local_irq_save(flags);
-                               v = p->vm_stat_diff[i];
-                               p->vm_stat_diff[i] = 0;
-                               local_irq_restore(flags);
                                atomic_long_add(v, &zone->vm_stat[i]);
                                global_diff[i] += v;
 #ifdef CONFIG_NUMA
                                /* 3 seconds idle till flush */
-                               p->expire = 3;
+                               __this_cpu_write(p->expire, 3);
 #endif
                        }
+               }
                cond_resched();
 #ifdef CONFIG_NUMA
                /*
@@ -468,29 +472,57 @@ void refresh_cpu_vm_stats(int cpu)
                 * Check if there are pages remaining in this pageset
                 * if not then there is nothing to expire.
                 */
-               if (!p->expire || !p->pcp.count)
+               if (!__this_cpu_read(p->expire) ||
+                              !__this_cpu_read(p->pcp.count))
                        continue;
 
                /*
                 * We never drain zones local to this processor.
                 */
                if (zone_to_nid(zone) == numa_node_id()) {
-                       p->expire = 0;
+                       __this_cpu_write(p->expire, 0);
                        continue;
                }
 
-               p->expire--;
-               if (p->expire)
+
+               if (__this_cpu_dec_return(p->expire))
                        continue;
 
-               if (p->pcp.count)
-                       drain_zone_pages(zone, &p->pcp);
+               if (__this_cpu_read(p->pcp.count))
+                       drain_zone_pages(zone, __this_cpu_ptr(&p->pcp));
 #endif
        }
+       fold_diff(global_diff);
+}
 
-       for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
-               if (global_diff[i])
-                       atomic_long_add(global_diff[i], &vm_stat[i]);
+/*
+ * Fold the data for an offline cpu into the global array.
+ * There cannot be any access by the offline cpu and therefore
+ * synchronization is simplified.
+ */
+void cpu_vm_stats_fold(int cpu)
+{
+       struct zone *zone;
+       int i;
+       int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
+
+       for_each_populated_zone(zone) {
+               struct per_cpu_pageset *p;
+
+               p = per_cpu_ptr(zone->pageset, cpu);
+
+               for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+                       if (p->vm_stat_diff[i]) {
+                               int v;
+
+                               v = p->vm_stat_diff[i];
+                               p->vm_stat_diff[i] = 0;
+                               atomic_long_add(v, &zone->vm_stat[i]);
+                               global_diff[i] += v;
+                       }
+       }
+
+       fold_diff(global_diff);
 }
 
 /*
@@ -703,6 +735,7 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
 const char * const vmstat_text[] = {
        /* Zoned VM counters */
        "nr_free_pages",
+       "nr_alloc_batch",
        "nr_inactive_anon",
        "nr_active_anon",
        "nr_inactive_file",
@@ -817,6 +850,12 @@ const char * const vmstat_text[] = {
        "thp_zero_page_alloc",
        "thp_zero_page_alloc_failed",
 #endif
+#ifdef CONFIG_SMP
+       "nr_tlb_remote_flush",
+       "nr_tlb_remote_flush_received",
+#endif
+       "nr_tlb_local_flush_all",
+       "nr_tlb_local_flush_one",
 
 #endif /* CONFIG_VM_EVENTS_COUNTERS */
 };
@@ -1052,7 +1091,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                   "\n  all_unreclaimable: %u"
                   "\n  start_pfn:         %lu"
                   "\n  inactive_ratio:    %u",
-                  zone->all_unreclaimable,
+                  !zone_reclaimable(zone),
                   zone->zone_start_pfn,
                   zone->inactive_ratio);
        seq_putc(m, '\n');
@@ -1177,7 +1216,7 @@ int sysctl_stat_interval __read_mostly = HZ;
 
 static void vmstat_update(struct work_struct *w)
 {
-       refresh_cpu_vm_stats(smp_processor_id());
+       refresh_cpu_vm_stats();
        schedule_delayed_work(&__get_cpu_var(vmstat_work),
                round_jiffies_relative(sysctl_stat_interval));
 }