]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/swap.c
staging: fsl-mc: move irq domain creation prototype to public header
[karo-tx-linux.git] / mm / swap.c
index 5dabf444d724db98595567b0f7daed7d53fc877e..98d08b4579faa42b8ef43ba4563ed6cb76f19893 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -46,7 +46,7 @@ int page_cluster;
 static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
 static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
 static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
-static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
+static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
 #ifdef CONFIG_SMP
 static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
 #endif
@@ -97,6 +97,16 @@ static void __put_compound_page(struct page *page)
 
 void __put_page(struct page *page)
 {
+       if (is_zone_device_page(page)) {
+               put_dev_pagemap(page->pgmap);
+
+               /*
+                * The page belongs to the device that created pgmap. Do
+                * not return it to page allocator.
+                */
+               return;
+       }
+
        if (unlikely(PageCompound(page)))
                __put_compound_page(page);
        else
@@ -561,20 +571,27 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
 }
 
 
-static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
+static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
                            void *arg)
 {
-       if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
-               int file = page_is_file_cache(page);
-               int lru = page_lru_base_type(page);
+       if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
+           !PageUnevictable(page)) {
+               bool active = PageActive(page);
 
-               del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
+               del_page_from_lru_list(page, lruvec,
+                                      LRU_INACTIVE_ANON + active);
                ClearPageActive(page);
                ClearPageReferenced(page);
-               add_page_to_lru_list(page, lruvec, lru);
+               /*
+                * lazyfree pages are clean anonymous pages. They have
+                * SwapBacked flag cleared to distinguish normal anonymous
+                * pages
+                */
+               ClearPageSwapBacked(page);
+               add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
 
-               __count_vm_event(PGDEACTIVATE);
-               update_page_reclaim_stat(lruvec, file, 0);
+               __count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
+               update_page_reclaim_stat(lruvec, 1, 0);
        }
 }
 
@@ -604,9 +621,9 @@ void lru_add_drain_cpu(int cpu)
        if (pagevec_count(pvec))
                pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
 
-       pvec = &per_cpu(lru_deactivate_pvecs, cpu);
+       pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
        if (pagevec_count(pvec))
-               pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+               pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
 
        activate_page_drain(cpu);
 }
@@ -638,22 +655,22 @@ void deactivate_file_page(struct page *page)
 }
 
 /**
- * deactivate_page - deactivate a page
+ * mark_page_lazyfree - make an anon page lazyfree
  * @page: page to deactivate
  *
- * deactivate_page() moves @page to the inactive list if @page was on the active
- * list and was not an unevictable page.  This is done to accelerate the reclaim
- * of @page.
+ * mark_page_lazyfree() moves @page to the inactive file list.
+ * This is done to accelerate the reclaim of @page.
  */
-void deactivate_page(struct page *page)
+void mark_page_lazyfree(struct page *page)
 {
-       if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
-               struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
+       if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
+           !PageUnevictable(page)) {
+               struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
 
                get_page(page);
                if (!pagevec_add(pvec, page) || PageCompound(page))
-                       pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
-               put_cpu_var(lru_deactivate_pvecs);
+                       pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
+               put_cpu_var(lru_lazyfree_pvecs);
        }
 }
 
@@ -693,7 +710,7 @@ void lru_add_drain_all(void)
                if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
                    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
                    pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
-                   pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
+                   pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
                    need_activate_page_drain(cpu)) {
                        INIT_WORK(work, lru_add_drain_per_cpu);
                        queue_work_on(cpu, mm_percpu_wq, work);