]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
When direct reclaim encounters a dirty page, it gets recycled around the
authorMel Gorman <mgorman@suse.de>
Wed, 24 Aug 2011 23:47:03 +0000 (09:47 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 22 Sep 2011 08:26:23 +0000 (18:26 +1000)
LRU for another cycle.  This patch marks the page PageReclaim similar to
deactivate_page() so that the page gets reclaimed almost immediately after
the page gets cleaned.  This is to avoid reclaiming clean pages that are
younger than a dirty page encountered at the end of the LRU that might
have been something like a use-once page.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: Johannes Weiner <jweiner@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Alex Elder <aelder@sgi.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mmzone.h
mm/vmscan.c
mm/vmstat.c

index 43ef561cc525b6754f85808928aa29122642e924..1ed4116bcc3d8faf95885d0d4085126da683bb3d 100644 (file)
@@ -100,7 +100,7 @@ enum zone_stat_item {
        NR_UNSTABLE_NFS,        /* NFS unstable pages */
        NR_BOUNCE,
        NR_VMSCAN_WRITE,
-       NR_VMSCAN_WRITE_SKIP,
+       NR_VMSCAN_IMMEDIATE,    /* Prioritise for reclaim when writeback ends */
        NR_WRITEBACK_TEMP,      /* Writeback using temporary buffers */
        NR_ISOLATED_ANON,       /* Temporary isolated pages from anon lru */
        NR_ISOLATED_FILE,       /* Temporary isolated pages from file lru */
index c3aefa7fb600469c4c4693404517e00e836c3ab0..5bc9eff60d188d929a2cff25e399abef856be609 100644 (file)
@@ -866,7 +866,15 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                         */
                        if (page_is_file_cache(page) &&
                                        (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
-                               inc_zone_page_state(page, NR_VMSCAN_WRITE_SKIP);
+                               /*
+                                * Immediately reclaim when written back.
+                                * Similar in principal to deactivate_page()
+                                * except we already have the page isolated
+                                * and know it's dirty
+                                */
+                               inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
+                               SetPageReclaim(page);
+
                                goto keep_locked;
                        }
 
index 210bd8ff3a6e03745370253c89605e4b9db1fc51..56e529a40517d115754554191a2ba0ab6535f93c 100644 (file)
@@ -702,7 +702,7 @@ const char * const vmstat_text[] = {
        "nr_unstable",
        "nr_bounce",
        "nr_vmscan_write",
-       "nr_vmscan_write_skip",
+       "nr_vmscan_immediate_reclaim",
        "nr_writeback_temp",
        "nr_isolated_anon",
        "nr_isolated_file",