]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/page_alloc.c
arm64: dts: marvell: armada-37xx: Align the compatible string
[karo-tx-linux.git] / mm / page_alloc.c
index d90792addeb973514d1556d23aab99836afed24d..f9e450c6b6e414d61b00d5a61be9cdea3b773e1b 100644 (file)
@@ -3283,14 +3283,15 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
                enum compact_priority prio, enum compact_result *compact_result)
 {
        struct page *page;
+       unsigned int noreclaim_flag;
 
        if (!order)
                return NULL;
 
-       current->flags |= PF_MEMALLOC;
+       noreclaim_flag = memalloc_noreclaim_save();
        *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
                                                                        prio);
-       current->flags &= ~PF_MEMALLOC;
+       memalloc_noreclaim_restore(noreclaim_flag);
 
        if (*compact_result <= COMPACT_INACTIVE)
                return NULL;
@@ -3437,12 +3438,13 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
 {
        struct reclaim_state reclaim_state;
        int progress;
+       unsigned int noreclaim_flag;
 
        cond_resched();
 
        /* We now go into synchronous reclaim */
        cpuset_memory_pressure_bump();
-       current->flags |= PF_MEMALLOC;
+       noreclaim_flag = memalloc_noreclaim_save();
        lockdep_set_current_reclaim_state(gfp_mask);
        reclaim_state.reclaimed_slab = 0;
        current->reclaim_state = &reclaim_state;
@@ -3452,7 +3454,7 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
 
        current->reclaim_state = NULL;
        lockdep_clear_current_reclaim_state();
-       current->flags &= ~PF_MEMALLOC;
+       memalloc_noreclaim_restore(noreclaim_flag);
 
        cond_resched();
 
@@ -3665,6 +3667,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                                                struct alloc_context *ac)
 {
        bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
+       const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
        struct page *page = NULL;
        unsigned int alloc_flags;
        unsigned long did_some_progress;
@@ -3732,12 +3735,17 @@ retry_cpuset:
 
        /*
         * For costly allocations, try direct compaction first, as it's likely
-        * that we have enough base pages and don't need to reclaim. Don't try
-        * that for allocations that are allowed to ignore watermarks, as the
-        * ALLOC_NO_WATERMARKS attempt didn't yet happen.
+        * that we have enough base pages and don't need to reclaim. For non-
+        * movable high-order allocations, do that as well, as compaction will
+        * try prevent permanent fragmentation by migrating from blocks of the
+        * same migratetype.
+        * Don't try this for allocations that are allowed to ignore
+        * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
         */
-       if (can_direct_reclaim && order > PAGE_ALLOC_COSTLY_ORDER &&
-               !gfp_pfmemalloc_allowed(gfp_mask)) {
+       if (can_direct_reclaim &&
+                       (costly_order ||
+                          (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
+                       && !gfp_pfmemalloc_allowed(gfp_mask)) {
                page = __alloc_pages_direct_compact(gfp_mask, order,
                                                alloc_flags, ac,
                                                INIT_COMPACT_PRIORITY,
@@ -3749,7 +3757,7 @@ retry_cpuset:
                 * Checks for costly allocations with __GFP_NORETRY, which
                 * includes THP page fault allocations
                 */
-               if (gfp_mask & __GFP_NORETRY) {
+               if (costly_order && (gfp_mask & __GFP_NORETRY)) {
                        /*
                         * If compaction is deferred for high-order allocations,
                         * it is because sync compaction recently failed. If
@@ -3830,7 +3838,7 @@ retry:
         * Do not retry costly high order allocations unless they are
         * __GFP_REPEAT
         */
-       if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT))
+       if (costly_order && !(gfp_mask & __GFP_REPEAT))
                goto nopage;
 
        if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,