]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/vmscan.c
vmscan: bail out of direct reclaim after swap_cluster_max pages
[karo-tx-linux.git] / mm / vmscan.c
index 62e7f62fb559c7a09b5b15b4d7a0b3a7b5b4c5e5..5faa7739487f9e9da92f1e8c3a59f00cac1deb32 100644 (file)
@@ -52,6 +52,9 @@ struct scan_control {
        /* Incremented by the number of inactive pages that were scanned */
        unsigned long nr_scanned;
 
+       /* Number of pages freed so far during a call to shrink_zones() */
+       unsigned long nr_reclaimed;
+
        /* This context's GFP mask */
        gfp_t gfp_mask;
 
@@ -617,7 +620,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                        referenced && page_mapping_inuse(page))
                        goto activate_locked;
 
-#ifdef CONFIG_SWAP
                /*
                 * Anonymous process memory has backing store?
                 * Try to allocate it some swap space here.
@@ -625,20 +627,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (PageAnon(page) && !PageSwapCache(page)) {
                        if (!(sc->gfp_mask & __GFP_IO))
                                goto keep_locked;
-                       switch (try_to_munlock(page)) {
-                       case SWAP_FAIL:         /* shouldn't happen */
-                       case SWAP_AGAIN:
-                               goto keep_locked;
-                       case SWAP_MLOCK:
-                               goto cull_mlocked;
-                       case SWAP_SUCCESS:
-                               ; /* fall thru'; add to swap cache */
-                       }
-                       if (!add_to_swap(page, GFP_ATOMIC))
+                       if (!add_to_swap(page))
                                goto activate_locked;
                        may_enter_fs = 1;
                }
-#endif /* CONFIG_SWAP */
 
                mapping = page_mapping(page);
 
@@ -752,6 +744,8 @@ free_it:
                continue;
 
 cull_mlocked:
+               if (PageSwapCache(page))
+                       try_to_free_swap(page);
                unlock_page(page);
                putback_lru_page(page);
                continue;
@@ -759,7 +753,7 @@ cull_mlocked:
 activate_locked:
                /* Not a candidate for swapping, so reclaim swap space. */
                if (PageSwapCache(page) && vm_swap_full())
-                       remove_exclusive_swap_page_ref(page);
+                       try_to_free_swap(page);
                VM_BUG_ON(PageActive(page));
                SetPageActive(page);
                pgactivate++;
@@ -1255,7 +1249,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
         * This helps balance scan pressure between file and anonymous
         * pages in get_scan_ratio.
         */
-       zone->recent_rotated[!!file] += pgmoved;
+       if (scan_global_lru(sc))
+               zone->recent_rotated[!!file] += pgmoved;
 
        /*
         * Move the pages to the [file or anon] inactive list.
@@ -1336,12 +1331,6 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
        unsigned long anon_prio, file_prio;
        unsigned long ap, fp;
 
-       anon  = zone_page_state(zone, NR_ACTIVE_ANON) +
-               zone_page_state(zone, NR_INACTIVE_ANON);
-       file  = zone_page_state(zone, NR_ACTIVE_FILE) +
-               zone_page_state(zone, NR_INACTIVE_FILE);
-       free  = zone_page_state(zone, NR_FREE_PAGES);
-
        /* If we have no swap space, do not bother scanning anon pages. */
        if (nr_swap_pages <= 0) {
                percent[0] = 0;
@@ -1349,6 +1338,12 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
                return;
        }
 
+       anon  = zone_page_state(zone, NR_ACTIVE_ANON) +
+               zone_page_state(zone, NR_INACTIVE_ANON);
+       file  = zone_page_state(zone, NR_ACTIVE_FILE) +
+               zone_page_state(zone, NR_INACTIVE_FILE);
+       free  = zone_page_state(zone, NR_FREE_PAGES);
+
        /* If we have very few page cache pages, force-scan anon pages. */
        if (unlikely(file + free <= zone->pages_high)) {
                percent[0] = 100;
@@ -1408,12 +1403,11 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
-static unsigned long shrink_zone(int priority, struct zone *zone,
+static void shrink_zone(int priority, struct zone *zone,
                                struct scan_control *sc)
 {
        unsigned long nr[NR_LRU_LISTS];
        unsigned long nr_to_scan;
-       unsigned long nr_reclaimed = 0;
        unsigned long percent[2];       /* anon @ 0; file @ 1 */
        enum lru_list l;
 
@@ -1454,10 +1448,21 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
                                        (unsigned long)sc->swap_cluster_max);
                                nr[l] -= nr_to_scan;
 
-                               nr_reclaimed += shrink_list(l, nr_to_scan,
+                               sc->nr_reclaimed += shrink_list(l, nr_to_scan,
                                                        zone, sc, priority);
                        }
                }
+               /*
+                * On large memory systems, scan >> priority can become
+                * really large. This is fine for the starting priority;
+                * we want to put equal scanning pressure on each zone.
+                * However, if the VM has a harder time of freeing pages,
+                * with multiple processes reclaiming pages, the total
+                * freeing target can get unreasonably large.
+                */
+               if (sc->nr_reclaimed > sc->swap_cluster_max &&
+                       priority < DEF_PRIORITY && !current_is_kswapd())
+                       break;
        }
 
        /*
@@ -1470,7 +1475,6 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
                shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
 
        throttle_vm_writeout(sc->gfp_mask);
-       return nr_reclaimed;
 }
 
 /*
@@ -1484,16 +1488,13 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
  * b) The zones may be over pages_high but they must go *over* pages_high to
  *    satisfy the `incremental min' zone defense algorithm.
  *
- * Returns the number of reclaimed pages.
- *
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
+static void shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
-       unsigned long nr_reclaimed = 0;
        struct zoneref *z;
        struct zone *zone;
 
@@ -1524,10 +1525,8 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
                                                        priority);
                }
 
-               nr_reclaimed += shrink_zone(priority, zone, sc);
+               shrink_zone(priority, zone, sc);
        }
-
-       return nr_reclaimed;
 }
 
 /*
@@ -1552,7 +1551,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
        int priority;
        unsigned long ret = 0;
        unsigned long total_scanned = 0;
-       unsigned long nr_reclaimed = 0;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        unsigned long lru_pages = 0;
        struct zoneref *z;
@@ -1580,7 +1578,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                sc->nr_scanned = 0;
                if (!priority)
                        disable_swap_token();
-               nr_reclaimed += shrink_zones(priority, zonelist, sc);
+               shrink_zones(priority, zonelist, sc);
                /*
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
@@ -1588,13 +1586,13 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                if (scan_global_lru(sc)) {
                        shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
                        if (reclaim_state) {
-                               nr_reclaimed += reclaim_state->reclaimed_slab;
+                               sc->nr_reclaimed += reclaim_state->reclaimed_slab;
                                reclaim_state->reclaimed_slab = 0;
                        }
                }
                total_scanned += sc->nr_scanned;
-               if (nr_reclaimed >= sc->swap_cluster_max) {
-                       ret = nr_reclaimed;
+               if (sc->nr_reclaimed >= sc->swap_cluster_max) {
+                       ret = sc->nr_reclaimed;
                        goto out;
                }
 
@@ -1617,7 +1615,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
        }
        /* top priority shrink_zones still had more to do? don't OOM, then */
        if (!sc->all_unreclaimable && scan_global_lru(sc))
-               ret = nr_reclaimed;
+               ret = sc->nr_reclaimed;
 out:
        /*
         * Now that we've scanned all the zones at this priority level, note
@@ -1712,7 +1710,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
        int priority;
        int i;
        unsigned long total_scanned;
-       unsigned long nr_reclaimed;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
@@ -1731,7 +1728,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
 
 loop_again:
        total_scanned = 0;
-       nr_reclaimed = 0;
+       sc.nr_reclaimed = 0;
        sc.may_writepage = !laptop_mode;
        count_vm_event(PAGEOUTRUN);
 
@@ -1817,11 +1814,11 @@ loop_again:
                         */
                        if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
                                                end_zone, 0))
-                               nr_reclaimed += shrink_zone(priority, zone, &sc);
+                               shrink_zone(priority, zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
                        nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
                                                lru_pages);
-                       nr_reclaimed += reclaim_state->reclaimed_slab;
+                       sc.nr_reclaimed += reclaim_state->reclaimed_slab;
                        total_scanned += sc.nr_scanned;
                        if (zone_is_all_unreclaimable(zone))
                                continue;
@@ -1835,7 +1832,7 @@ loop_again:
                         * even in laptop mode
                         */
                        if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
-                           total_scanned > nr_reclaimed + nr_reclaimed / 2)
+                           total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
                                sc.may_writepage = 1;
                }
                if (all_zones_ok)
@@ -1853,7 +1850,7 @@ loop_again:
                 * matches the direct reclaim path behaviour in terms of impact
                 * on zone->*_priority.
                 */
-               if (nr_reclaimed >= SWAP_CLUSTER_MAX)
+               if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
                        break;
        }
 out:
@@ -1875,7 +1872,7 @@ out:
                goto loop_again;
        }
 
-       return nr_reclaimed;
+       return sc.nr_reclaimed;
 }
 
 /*
@@ -1902,7 +1899,7 @@ static int kswapd(void *p)
        };
        node_to_cpumask_ptr(cpumask, pgdat->node_id);
 
-       if (!cpus_empty(*cpumask))
+       if (!cpumask_empty(cpumask))
                set_cpus_allowed_ptr(tsk, cpumask);
        current->reclaim_state = &reclaim_state;
 
@@ -2141,7 +2138,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
                        pg_data_t *pgdat = NODE_DATA(nid);
                        node_to_cpumask_ptr(mask, pgdat->node_id);
 
-                       if (any_online_cpu(*mask) < nr_cpu_ids)
+                       if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
                                /* One of our CPUs online: restore mask */
                                set_cpus_allowed_ptr(pgdat->kswapd, mask);
                }
@@ -2227,7 +2224,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        struct task_struct *p = current;
        struct reclaim_state reclaim_state;
        int priority;
-       unsigned long nr_reclaimed = 0;
        struct scan_control sc = {
                .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
                .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
@@ -2260,9 +2256,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                priority = ZONE_RECLAIM_PRIORITY;
                do {
                        note_zone_scanning_priority(zone, priority);
-                       nr_reclaimed += shrink_zone(priority, zone, &sc);
+                       shrink_zone(priority, zone, &sc);
                        priority--;
-               } while (priority >= 0 && nr_reclaimed < nr_pages);
+               } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
        }
 
        slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
@@ -2286,13 +2282,13 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                 * Update nr_reclaimed by the number of slab pages we
                 * reclaimed from this zone.
                 */
-               nr_reclaimed += slab_reclaimable -
+               sc.nr_reclaimed += slab_reclaimable -
                        zone_page_state(zone, NR_SLAB_RECLAIMABLE);
        }
 
        p->reclaim_state = NULL;
        current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
-       return nr_reclaimed >= nr_pages;
+       return sc.nr_reclaimed >= nr_pages;
 }
 
 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
@@ -2472,7 +2468,7 @@ void scan_mapping_unevictable_pages(struct address_space *mapping)
  * back onto @zone's unevictable list.
  */
 #define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
-void scan_zone_unevictable_pages(struct zone *zone)
+static void scan_zone_unevictable_pages(struct zone *zone)
 {
        struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
        unsigned long scan;
@@ -2514,7 +2510,7 @@ void scan_zone_unevictable_pages(struct zone *zone)
  * that has possibly/probably made some previously unevictable pages
  * evictable.
  */
-void scan_all_zones_unevictable_pages(void)
+static void scan_all_zones_unevictable_pages(void)
 {
        struct zone *zone;