]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/page_alloc.c
mm: do not use page_count() without a page pin
[karo-tx-linux.git] / mm / page_alloc.c
index 44030096da631b5f49b4ee1a81d0b9ace59f5c7e..bb790f5919e38ac92242816eb60c9d5b7e7dba9c 100644 (file)
@@ -1158,8 +1158,10 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
                to_drain = pcp->batch;
        else
                to_drain = pcp->count;
-       free_pcppages_bulk(zone, to_drain, pcp);
-       pcp->count -= to_drain;
+       if (to_drain > 0) {
+               free_pcppages_bulk(zone, to_drain, pcp);
+               pcp->count -= to_drain;
+       }
        local_irq_restore(flags);
 }
 #endif
@@ -1529,16 +1531,16 @@ static int __init setup_fail_page_alloc(char *str)
 }
 __setup("fail_page_alloc=", setup_fail_page_alloc);
 
-static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
 {
        if (order < fail_page_alloc.min_order)
-               return 0;
+               return false;
        if (gfp_mask & __GFP_NOFAIL)
-               return 0;
+               return false;
        if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
-               return 0;
+               return false;
        if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
-               return 0;
+               return false;
 
        return should_fail(&fail_page_alloc.attr, 1 << order);
 }
@@ -1578,9 +1580,9 @@ late_initcall(fail_page_alloc_debugfs);
 
 #else /* CONFIG_FAIL_PAGE_ALLOC */
 
-static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
 {
-       return 0;
+       return false;
 }
 
 #endif /* CONFIG_FAIL_PAGE_ALLOC */
@@ -3915,7 +3917,8 @@ static int __zone_pcp_update(void *data)
                pcp = &pset->pcp;
 
                local_irq_save(flags);
-               free_pcppages_bulk(zone, pcp->count, pcp);
+               if (pcp->count > 0)
+                       free_pcppages_bulk(zone, pcp->count, pcp);
                setup_pageset(pset, batch);
                local_irq_restore(flags);
        }
@@ -5497,11 +5500,18 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
                        continue;
 
                page = pfn_to_page(check);
-               if (!page_count(page)) {
+               /*
+                * We can't use page_count without pin a page
+                * because another CPU can free compound page.
+                * This check already skips compound tails of THP
+                * because their page->_count is zero at all time.
+                */
+               if (!atomic_read(&page->_count)) {
                        if (PageBuddy(page))
                                iter += (1 << page_order(page)) - 1;
                        continue;
                }
+
                if (!PageLRU(page))
                        found++;
                /*
@@ -5635,7 +5645,12 @@ static struct page *
 __alloc_contig_migrate_alloc(struct page *page, unsigned long private,
                             int **resultp)
 {
-       return alloc_page(GFP_HIGHUSER_MOVABLE);
+       gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
+
+       if (PageHighMem(page))
+               gfp_mask |= __GFP_HIGHMEM;
+
+       return alloc_page(gfp_mask);
 }
 
 /* [start, end) must belong to a single zone. */