* Note that start_page and end_pages are not aligned on a pageblock
  * boundary. If alignment is required, use move_freepages_block()
  */
-int move_freepages(struct zone *zone,
+static int move_freepages(struct zone *zone,
                          struct page *start_page, struct page *end_page,
-                         int migratetype)
+                         int migratetype, int *num_movable)
 {
        struct page *page;
        unsigned int order;
        VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
 #endif
 
+       if (num_movable)
+               *num_movable = 0;
+
        for (page = start_page; page <= end_page;) {
                if (!pfn_valid_within(page_to_pfn(page))) {
                        page++;
                VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
 
                if (!PageBuddy(page)) {
+                       /*
+                        * We assume that pages that could be isolated for
+                        * migration are movable. But we don't actually try
+                        * isolating, as that would be expensive.
+                        */
+                       if (num_movable &&
+                                       (PageLRU(page) || __PageMovable(page)))
+                               (*num_movable)++;
+
                        page++;
                        continue;
                }
 }
 
 int move_freepages_block(struct zone *zone, struct page *page,
-                               int migratetype)
+                               int migratetype, int *num_movable)
 {
        unsigned long start_pfn, end_pfn;
        struct page *start_page, *end_page;
        if (!zone_spans_pfn(zone, end_pfn))
                return 0;
 
-       return move_freepages(zone, start_page, end_page, migratetype);
+       return move_freepages(zone, start_page, end_page, migratetype,
+                                                               num_movable);
 }
 
 static void change_pageblock_range(struct page *pageblock_page,
 /*
  * This function implements actual steal behaviour. If order is large enough,
  * we can steal whole pageblock. If not, we first move freepages in this
- * pageblock and check whether half of pages are moved or not. If half of
- * pages are moved, we can change migratetype of pageblock and permanently
- * use it's pages as requested migratetype in the future.
+ * pageblock to our migratetype and determine how many already-allocated pages
+ * are there in the pageblock with a compatible migratetype. If at least half
+ * of pages are free or compatible, we can change migratetype of the pageblock
+ * itself, so pages freed in the future will be put on the correct free list.
  */
 static void steal_suitable_fallback(struct zone *zone, struct page *page,
                                        int start_type, bool whole_block)
 {
        unsigned int current_order = page_order(page);
        struct free_area *area;
-       int pages;
+       int free_pages, movable_pages, alike_pages;
+       int old_block_type;
+
+       old_block_type = get_pageblock_migratetype(page);
 
        /*
         * This can happen due to races and we want to prevent broken
         * highatomic accounting.
         */
-       if (is_migrate_highatomic_page(page))
+       if (is_migrate_highatomic(old_block_type))
                goto single_page;
 
        /* Take ownership for orders >= pageblock_order */
        if (!whole_block)
                goto single_page;
 
-       pages = move_freepages_block(zone, page, start_type);
+       free_pages = move_freepages_block(zone, page, start_type,
+                                               &movable_pages);
+       /*
+        * Determine how many pages are compatible with our allocation.
+        * For movable allocation, it's the number of movable pages which
+        * we just obtained. For other types it's a bit more tricky.
+        */
+       if (start_type == MIGRATE_MOVABLE) {
+               alike_pages = movable_pages;
+       } else {
+               /*
+                * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
+                * to MOVABLE pageblock, consider all non-movable pages as
+                * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
+                * vice versa, be conservative since we can't distinguish the
+                * exact migratetype of non-movable pages.
+                */
+               if (old_block_type == MIGRATE_MOVABLE)
+                       alike_pages = pageblock_nr_pages
+                                               - (free_pages + movable_pages);
+               else
+                       alike_pages = 0;
+       }
+
        /* moving whole block can fail due to zone boundary conditions */
-       if (!pages)
+       if (!free_pages)
                goto single_page;
 
-       /* Claim the whole block if over half of it is free */
-       if (pages >= (1 << (pageblock_order-1)) ||
+       /*
+        * If a sufficient number of pages in the block are either free or of
+        * comparable migratability as our allocation, claim the whole block.
+        */
+       if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
                        page_group_by_mobility_disabled)
                set_pageblock_migratetype(page, start_type);
 
            && !is_migrate_cma(mt)) {
                zone->nr_reserved_highatomic += pageblock_nr_pages;
                set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
-               move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
+               move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
        }
 
 out_unlock:
                         * may increase.
                         */
                        set_pageblock_migratetype(page, ac->migratetype);
-                       ret = move_freepages_block(zone, page, ac->migratetype);
+                       ret = move_freepages_block(zone, page, ac->migratetype,
+                                                                       NULL);
                        if (ret) {
                                spin_unlock_irqrestore(&zone->lock, flags);
                                return ret;