]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/page_alloc.c
mm: vmstat: move slab statistics from zone to node counters
[karo-tx-linux.git] / mm / page_alloc.c
index 8aa860017d6679d7aa740656ae489a5b42c901f7..a35add8d7c0b77c99be628063597f83d75de5567 100644 (file)
@@ -4643,8 +4643,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
                        " present:%lukB"
                        " managed:%lukB"
                        " mlocked:%lukB"
-                       " slab_reclaimable:%lukB"
-                       " slab_unreclaimable:%lukB"
                        " kernel_stack:%lukB"
                        " pagetables:%lukB"
                        " bounce:%lukB"
@@ -4666,8 +4664,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
                        K(zone->present_pages),
                        K(zone->managed_pages),
                        K(zone_page_state(zone, NR_MLOCK)),
-                       K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
-                       K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
                        zone_page_state(zone, NR_KERNEL_STACK_KB),
                        K(zone_page_state(zone, NR_PAGETABLE)),
                        K(zone_page_state(zone, NR_BOUNCE)),
@@ -5153,6 +5149,7 @@ static void build_zonelists(pg_data_t *pgdat)
  */
 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
+static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
 static void setup_zone_pageset(struct zone *zone);
 
 /*
@@ -6053,6 +6050,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
        spin_lock_init(&pgdat->lru_lock);
        lruvec_init(node_lruvec(pgdat));
 
+       pgdat->per_cpu_nodestats = &boot_nodestats;
+
        for (j = 0; j < MAX_NR_ZONES; j++) {
                struct zone *zone = pgdat->node_zones + j;
                unsigned long size, realsize, freesize, memmap_pages;