]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/page_alloc.c
mm/page_allo.c: restructure free-page stealing code and fix a bug
[karo-tx-linux.git] / mm / page_alloc.c
index 2ca3e9bd739cf553ec0f78c66586f3feaf715ad5..b09ce5fe0cd225ffbcf1e63817bef142b7d00654 100644 (file)
@@ -1008,6 +1008,52 @@ static void change_pageblock_range(struct page *pageblock_page,
        }
 }
 
+/*
+ * If breaking a large block of pages, move all free pages to the preferred
+ * allocation list. If falling back for a reclaimable kernel allocation, be
+ * more aggressive about taking ownership of free pages.
+ *
+ * On the other hand, never change migration type of MIGRATE_CMA pageblocks
+ * nor move CMA pages to different free lists. We don't want unmovable pages
+ * to be allocated from MIGRATE_CMA areas.
+ *
+ * Returns the new migratetype of the pageblock (or the same old migratetype
+ * if it was unchanged).
+ */
+static int try_to_steal_freepages(struct zone *zone, struct page *page,
+                                 int start_type, int fallback_type)
+{
+       int current_order = page_order(page);
+
+       if (is_migrate_cma(fallback_type))
+               return fallback_type;
+
+       /* Take ownership for orders >= pageblock_order */
+       if (current_order >= pageblock_order) {
+               change_pageblock_range(page, current_order, start_type);
+               return start_type;
+       }
+
+       if (current_order >= pageblock_order / 2 ||
+           start_type == MIGRATE_RECLAIMABLE ||
+           page_group_by_mobility_disabled) {
+               int pages;
+
+               pages = move_freepages_block(zone, page, start_type);
+
+               /* Claim the whole block if over half of it is free */
+               if (pages >= (1 << (pageblock_order-1)) ||
+                               page_group_by_mobility_disabled) {
+
+                       set_pageblock_migratetype(page, start_type);
+                       return start_type;
+               }
+
+       }
+
+       return fallback_type;
+}
+
 /* Remove an element from the buddy allocator from the fallback list */
 static inline struct page *
 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
@@ -1015,7 +1061,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
        struct free_area *area;
        int current_order;
        struct page *page;
-       int migratetype, i;
+       int migratetype, new_type, i;
 
        /* Find the largest possible block of pages in the other list */
        for (current_order = MAX_ORDER-1; current_order >= order;
@@ -1035,51 +1081,28 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
                                        struct page, lru);
                        area->nr_free--;
 
-                       /*
-                        * If breaking a large block of pages, move all free
-                        * pages to the preferred allocation list. If falling
-                        * back for a reclaimable kernel allocation, be more
-                        * aggressive about taking ownership of free pages
-                        *
-                        * On the other hand, never change migration
-                        * type of MIGRATE_CMA pageblocks nor move CMA
-                        * pages on different free lists. We don't
-                        * want unmovable pages to be allocated from
-                        * MIGRATE_CMA areas.
-                        */
-                       if (!is_migrate_cma(migratetype) &&
-                           (current_order >= pageblock_order / 2 ||
-                            start_migratetype == MIGRATE_RECLAIMABLE ||
-                            page_group_by_mobility_disabled)) {
-                               int pages;
-                               pages = move_freepages_block(zone, page,
-                                                               start_migratetype);
-
-                               /* Claim the whole block if over half of it is free */
-                               if (pages >= (1 << (pageblock_order-1)) ||
-                                               page_group_by_mobility_disabled)
-                                       set_pageblock_migratetype(page,
-                                                               start_migratetype);
-
-                               migratetype = start_migratetype;
-                       }
+                       new_type = try_to_steal_freepages(zone, page,
+                                                         start_migratetype,
+                                                         migratetype);
 
                        /* Remove the page from the freelists */
                        list_del(&page->lru);
                        rmv_page_order(page);
 
-                       /* Take ownership for orders >= pageblock_order */
-                       if (current_order >= pageblock_order &&
-                           !is_migrate_cma(migratetype))
-                               change_pageblock_range(page, current_order,
-                                                       start_migratetype);
-
+                       /*
+                        * Borrow the excess buddy pages as well, irrespective
+                        * of whether we stole freepages, or took ownership of
+                        * the pageblock or not.
+                        *
+                        * Exception: When borrowing from MIGRATE_CMA, release
+                        * the excess buddy pages to CMA itself.
+                        */
                        expand(zone, page, order, current_order, area,
                               is_migrate_cma(migratetype)
                             ? migratetype : start_migratetype);
 
                        trace_mm_page_alloc_extfrag(page, order, current_order,
-                               start_migratetype, migratetype);
+                               start_migratetype, new_type);
 
                        return page;
                }