]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
mm: use is_migrate_highatomic() to simplify the code
authorXishi Qiu <qiuxishi@huawei.com>
Wed, 3 May 2017 21:52:52 +0000 (14:52 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 May 2017 22:52:08 +0000 (15:52 -0700)
Introduce two helpers, is_migrate_highatomic() and is_migrate_highatomic_page().

Simplify the code, no functional changes.

[akpm@linux-foundation.org: use static inlines rather than macros, per mhocko]
Link: http://lkml.kernel.org/r/58B94F15.6060606@huawei.com
Signed-off-by: Xishi Qiu <qiuxishi@huawei.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mmzone.h
mm/internal.h
mm/page_alloc.c

index 04e0969966f67a98130de3e3062d83ebc6f63509..446cf68c1c09e295552dada06485523cfe3b03d2 100644 (file)
@@ -35,7 +35,7 @@
  */
 #define PAGE_ALLOC_COSTLY_ORDER 3
 
-enum {
+enum migratetype {
        MIGRATE_UNMOVABLE,
        MIGRATE_MOVABLE,
        MIGRATE_RECLAIMABLE,
index a36719572eb9e59a493871238fff112b9b7a2486..04d08ef91224bfb8be0a6a2aed456281f7702c44 100644 (file)
@@ -510,4 +510,14 @@ extern const struct trace_print_flags pageflag_names[];
 extern const struct trace_print_flags vmaflag_names[];
 extern const struct trace_print_flags gfpflag_names[];
 
+static inline bool is_migrate_highatomic(enum migratetype migratetype)
+{
+       return migratetype == MIGRATE_HIGHATOMIC;
+}
+
+static inline bool is_migrate_highatomic_page(struct page *page)
+{
+       return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
+}
+
 #endif /* __MM_INTERNAL_H */
index f82beddbd96f8ea310e28d81c76f5dbf2157981f..34ac32428de81dd2ff76f17e618b721206b50ccf 100644 (file)
@@ -2036,8 +2036,8 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
 
        /* Yoink! */
        mt = get_pageblock_migratetype(page);
-       if (mt != MIGRATE_HIGHATOMIC &&
-                       !is_migrate_isolate(mt) && !is_migrate_cma(mt)) {
+       if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
+           && !is_migrate_cma(mt)) {
                zone->nr_reserved_highatomic += pageblock_nr_pages;
                set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
                move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
@@ -2094,8 +2094,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
                         * from highatomic to ac->migratetype. So we should
                         * adjust the count once.
                         */
-                       if (get_pageblock_migratetype(page) ==
-                                                       MIGRATE_HIGHATOMIC) {
+                       if (is_migrate_highatomic_page(page)) {
                                /*
                                 * It should never happen but changes to
                                 * locking could inadvertently allow a per-cpu
@@ -2152,8 +2151,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
 
                page = list_first_entry(&area->free_list[fallback_mt],
                                                struct page, lru);
-               if (can_steal &&
-                       get_pageblock_migratetype(page) != MIGRATE_HIGHATOMIC)
+               if (can_steal && !is_migrate_highatomic_page(page))
                        steal_suitable_fallback(zone, page, start_migratetype);
 
                /* Remove the page from the freelists */
@@ -2493,7 +2491,7 @@ void free_hot_cold_page(struct page *page, bool cold)
        /*
         * We only track unmovable, reclaimable and movable on pcp lists.
         * Free ISOLATE pages back to the allocator because they are being
-        * offlined but treat RESERVE as movable pages so we can get those
+        * offlined but treat HIGHATOMIC as movable pages so we can get those
         * areas back if necessary. Otherwise, we may have to free
         * excessively into the page allocator
         */
@@ -2603,7 +2601,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
                for (; page < endpage; page += pageblock_nr_pages) {
                        int mt = get_pageblock_migratetype(page);
                        if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
-                               && mt != MIGRATE_HIGHATOMIC)
+                           && !is_migrate_highatomic(mt))
                                set_pageblock_migratetype(page,
                                                          MIGRATE_MOVABLE);
                }