]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/page_isolation.c
mm/memory-hotplug: switch locking to a percpu rwsem
[karo-tx-linux.git] / mm / page_isolation.c
index 5092e4ef00c832fefb624a1d9d8f618402ad4402..757410d9f758a22ca6306b84d00c5929dc3fa79a 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/memory.h>
 #include <linux/hugetlb.h>
 #include <linux/page_owner.h>
+#include <linux/migrate.h>
 #include "internal.h"
 
 #define CREATE_TRACE_POINTS
@@ -138,12 +139,18 @@ static inline struct page *
 __first_valid_page(unsigned long pfn, unsigned long nr_pages)
 {
        int i;
-       for (i = 0; i < nr_pages; i++)
-               if (pfn_valid_within(pfn + i))
-                       break;
-       if (unlikely(i == nr_pages))
-               return NULL;
-       return pfn_to_page(pfn + i);
+
+       for (i = 0; i < nr_pages; i++) {
+               struct page *page;
+
+               if (!pfn_valid_within(pfn + i))
+                       continue;
+               page = pfn_to_online_page(pfn + i);
+               if (!page)
+                       continue;
+               return page;
+       }
+       return NULL;
 }
 
 /*
@@ -184,8 +191,12 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 undo:
        for (pfn = start_pfn;
             pfn < undo_pfn;
-            pfn += pageblock_nr_pages)
-               unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
+            pfn += pageblock_nr_pages) {
+               struct page *page = pfn_to_online_page(pfn);
+               if (!page)
+                       continue;
+               unset_migratetype_isolate(page, migratetype);
+       }
 
        return -EBUSY;
 }
@@ -284,20 +295,5 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
 struct page *alloc_migrate_target(struct page *page, unsigned long private,
                                  int **resultp)
 {
-       gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
-
-       /*
-        * TODO: allocate a destination hugepage from a nearest neighbor node,
-        * accordance with memory policy of the user process if possible. For
-        * now as a simple work-around, we use the next node for destination.
-        */
-       if (PageHuge(page))
-               return alloc_huge_page_node(page_hstate(compound_head(page)),
-                                           next_node_in(page_to_nid(page),
-                                                        node_online_map));
-
-       if (PageHighMem(page))
-               gfp_mask |= __GFP_HIGHMEM;
-
-       return alloc_page(gfp_mask);
+       return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
 }