/* Direct compactor: Is a suitable page free? */
        for (order = cc->order; order < MAX_ORDER; order++) {
                struct free_area *area = &zone->free_area[order];
+               bool can_steal;
 
                /* Job done if page is free of the right migratetype */
                if (!list_empty(&area->free_list[migratetype]))
                        return COMPACT_PARTIAL;
 
-               /* Job done if allocation would set block type */
-               if (order >= pageblock_order && area->nr_free)
+#ifdef CONFIG_CMA
+               /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
+               if (migratetype == MIGRATE_MOVABLE &&
+                       !list_empty(&area->free_list[MIGRATE_CMA]))
+                       return COMPACT_PARTIAL;
+#endif
+               /*
+                * Job done if allocation would steal freepages from
+                * other migratetype buddy lists.
+                */
+               if (find_suitable_fallback(area, order, migratetype,
+                                               true, &can_steal) != -1)
                        return COMPACT_PARTIAL;
        }
 
 
 unsigned long
 isolate_migratepages_range(struct compact_control *cc,
                           unsigned long low_pfn, unsigned long end_pfn);
+int find_suitable_fallback(struct free_area *area, unsigned int order,
+                       int migratetype, bool only_stealable, bool *can_steal);
 
 #endif
 
 
                set_pageblock_migratetype(page, start_type);
 }
 
-/* Check whether there is a suitable fallback freepage with requested order. */
-static int find_suitable_fallback(struct free_area *area, unsigned int order,
-                                       int migratetype, bool *can_steal)
+/*
+ * Check whether there is a suitable fallback freepage with requested order.
+ * If only_stealable is true, this function returns fallback_mt only if
+ * we can steal other freepages all together. This would help to reduce
+ * fragmentation due to mixed migratetype pages in one pageblock.
+ */
+int find_suitable_fallback(struct free_area *area, unsigned int order,
+                       int migratetype, bool only_stealable, bool *can_steal)
 {
        int i;
        int fallback_mt;
                if (can_steal_fallback(order, migratetype))
                        *can_steal = true;
 
-               return fallback_mt;
+               if (!only_stealable)
+                       return fallback_mt;
+
+               if (*can_steal)
+                       return fallback_mt;
        }
 
        return -1;
                                --current_order) {
                area = &(zone->free_area[current_order]);
                fallback_mt = find_suitable_fallback(area, current_order,
-                               start_migratetype, &can_steal);
+                               start_migratetype, false, &can_steal);
                if (fallback_mt == -1)
                        continue;