extern const struct trace_print_flags vmaflag_names[];
 extern const struct trace_print_flags gfpflag_names[];
 
+static inline bool is_migrate_highatomic(enum migratetype migratetype)
+{
+       return migratetype == MIGRATE_HIGHATOMIC;
+}
+
+static inline bool is_migrate_highatomic_page(struct page *page)
+{
+       return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
+}
+
 #endif /* __MM_INTERNAL_H */
 
 
        /* Yoink! */
        mt = get_pageblock_migratetype(page);
-       if (mt != MIGRATE_HIGHATOMIC &&
-                       !is_migrate_isolate(mt) && !is_migrate_cma(mt)) {
+       if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
+           && !is_migrate_cma(mt)) {
                zone->nr_reserved_highatomic += pageblock_nr_pages;
                set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
                move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
                         * from highatomic to ac->migratetype. So we should
                         * adjust the count once.
                         */
-                       if (get_pageblock_migratetype(page) ==
-                                                       MIGRATE_HIGHATOMIC) {
+                       if (is_migrate_highatomic_page(page)) {
                                /*
                                 * It should never happen but changes to
                                 * locking could inadvertently allow a per-cpu
 
                page = list_first_entry(&area->free_list[fallback_mt],
                                                struct page, lru);
-               if (can_steal &&
-                       get_pageblock_migratetype(page) != MIGRATE_HIGHATOMIC)
+               if (can_steal && !is_migrate_highatomic_page(page))
                        steal_suitable_fallback(zone, page, start_migratetype);
 
                /* Remove the page from the freelists */
        /*
         * We only track unmovable, reclaimable and movable on pcp lists.
         * Free ISOLATE pages back to the allocator because they are being
-        * offlined but treat RESERVE as movable pages so we can get those
+        * offlined but treat HIGHATOMIC as movable pages so we can get those
         * areas back if necessary. Otherwise, we may have to free
         * excessively into the page allocator
         */
                for (; page < endpage; page += pageblock_nr_pages) {
                        int mt = get_pageblock_migratetype(page);
                        if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
-                               && mt != MIGRATE_HIGHATOMIC)
+                           && !is_migrate_highatomic(mt))
                                set_pageblock_migratetype(page,
                                                          MIGRATE_MOVABLE);
                }