]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/compaction.c
zsmalloc: introduce zspage structure
[karo-tx-linux.git] / mm / compaction.c
index 79bfe0e06907ac91875dddc9a3fbcf579b0c4d51..6095055bd70f8d2b3cf74b6f45a17b2ff9d07675 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/backing-dev.h>
 #include <linux/sysctl.h>
 #include <linux/sysfs.h>
-#include <linux/balloon_compaction.h>
 #include <linux/page-isolation.h>
 #include <linux/kasan.h>
 #include <linux/kthread.h>
@@ -81,6 +80,44 @@ static inline bool migrate_async_suitable(int migratetype)
 
 #ifdef CONFIG_COMPACTION
 
+int PageMovable(struct page *page)
+{
+       struct address_space *mapping;
+
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       if (!__PageMovable(page))
+               return 0;
+
+       mapping = page_mapping(page);
+       if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
+               return 1;
+
+       return 0;
+}
+EXPORT_SYMBOL(PageMovable);
+
+void __SetPageMovable(struct page *page, struct address_space *mapping)
+{
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
+       page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
+}
+EXPORT_SYMBOL(__SetPageMovable);
+
+void __ClearPageMovable(struct page *page)
+{
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_PAGE(!PageMovable(page), page);
+       /*
+        * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
+        * flag so that VM can catch up released page by driver after isolation.
+        * With it, VM migration doesn't try to put it back.
+        */
+       page->mapping = (void *)((unsigned long)page->mapping &
+                               PAGE_MAPPING_MOVABLE);
+}
+EXPORT_SYMBOL(__ClearPageMovable);
+
 /* Do not skip compaction more than 64 times */
 #define COMPACT_MAX_DEFER_SHIFT 6
 
@@ -670,7 +707,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
        /* Time to isolate some pages for migration */
        for (; low_pfn < end_pfn; low_pfn++) {
-               bool is_lru;
 
                if (skip_on_failure && low_pfn >= next_skip_pfn) {
                        /*
@@ -732,21 +768,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                        continue;
                }
 
-               /*
-                * Check may be lockless but that's ok as we recheck later.
-                * It's possible to migrate LRU pages and balloon pages
-                * Skip any other type of page
-                */
-               is_lru = PageLRU(page);
-               if (!is_lru) {
-                       if (unlikely(balloon_page_movable(page))) {
-                               if (balloon_page_isolate(page)) {
-                                       /* Successfully isolated */
-                                       goto isolate_success;
-                               }
-                       }
-               }
-
                /*
                 * Regardless of being on LRU, compound pages such as THP and
                 * hugetlbfs are not to be compacted. We can potentially save
@@ -763,8 +784,30 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                        goto isolate_fail;
                }
 
-               if (!is_lru)
+               /*
+                * Check may be lockless but that's ok as we recheck later.
+                * It's possible to migrate LRU and non-lru movable pages.
+                * Skip any other type of page
+                */
+               if (!PageLRU(page)) {
+                       /*
+                        * __PageMovable can return false positive so we need
+                        * to verify it under page_lock.
+                        */
+                       if (unlikely(__PageMovable(page)) &&
+                                       !PageIsolated(page)) {
+                               if (locked) {
+                                       spin_unlock_irqrestore(&zone->lru_lock,
+                                                                       flags);
+                                       locked = false;
+                               }
+
+                               if (isolate_movable_page(page, isolate_mode))
+                                       goto isolate_success;
+                       }
+
                        goto isolate_fail;
+               }
 
                /*
                 * Migration will fail if an anonymous page is pinned in memory,
@@ -1009,8 +1052,6 @@ static void isolate_freepages(struct compact_control *cc)
                                block_end_pfn = block_start_pfn,
                                block_start_pfn -= pageblock_nr_pages,
                                isolate_start_pfn = block_start_pfn) {
-               unsigned long isolated;
-
                /*
                 * This can iterate a massively long zone without finding any
                 * suitable migration targets, so periodically check if we need
@@ -1034,36 +1075,30 @@ static void isolate_freepages(struct compact_control *cc)
                        continue;
 
                /* Found a block suitable for isolating free pages from. */
-               isolated = isolate_freepages_block(cc, &isolate_start_pfn,
-                                               block_end_pfn, freelist, false);
-               /* If isolation failed early, do not continue needlessly */
-               if (!isolated && isolate_start_pfn < block_end_pfn &&
-                   cc->nr_migratepages > cc->nr_freepages)
-                       break;
+               isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
+                                       freelist, false);
 
                /*
-                * If we isolated enough freepages, or aborted due to async
-                * compaction being contended, terminate the loop.
-                * Remember where the free scanner should restart next time,
-                * which is where isolate_freepages_block() left off.
-                * But if it scanned the whole pageblock, isolate_start_pfn
-                * now points at block_end_pfn, which is the start of the next
-                * pageblock.
-                * In that case we will however want to restart at the start
-                * of the previous pageblock.
+                * If we isolated enough freepages, or aborted due to lock
+                * contention, terminate.
                 */
                if ((cc->nr_freepages >= cc->nr_migratepages)
                                                        || cc->contended) {
-                       if (isolate_start_pfn >= block_end_pfn)
+                       if (isolate_start_pfn >= block_end_pfn) {
+                               /*
+                                * Restart at previous pageblock if more
+                                * freepages can be isolated next time.
+                                */
                                isolate_start_pfn =
                                        block_start_pfn - pageblock_nr_pages;
+                       }
                        break;
-               } else {
+               } else if (isolate_start_pfn < block_end_pfn) {
                        /*
-                        * isolate_freepages_block() should not terminate
-                        * prematurely unless contended, or isolated enough
+                        * If isolation failed early, do not continue
+                        * needlessly.
                         */
-                       VM_BUG_ON(isolate_start_pfn < block_end_pfn);
+                       break;
                }
        }