unsigned int order;
        int prefetch_nr = READ_ONCE(pcp->batch);
        bool isolated_pageblocks;
-       struct page *page, *tmp;
-       LIST_HEAD(head);
+       struct page *page;
 
        /*
         * Ensure proper count is passed which otherwise would stuck in the
        /* Ensure requested pindex is drained first. */
        pindex = pindex - 1;
 
+       /*
+        * local_lock_irq held so equivalent to spin_lock_irqsave for
+        * both PREEMPT_RT and non-PREEMPT_RT configurations.
+        */
+       spin_lock(&zone->lock);
+       isolated_pageblocks = has_isolate_pageblock(zone);
+
        while (count > 0) {
                struct list_head *list;
                int nr_pages;
                nr_pages = 1 << order;
                BUILD_BUG_ON(MAX_ORDER >= (1<<NR_PCP_ORDER_WIDTH));
                do {
+                       int mt;
+
                        page = list_last_entry(list, struct page, lru);
+                       mt = get_pcppage_migratetype(page);
+
                        /* must delete to avoid corrupting pcp list */
                        list_del(&page->lru);
                        count -= nr_pages;
                        if (bulkfree_pcp_prepare(page))
                                continue;
 
-                       /* Encode order with the migratetype */
-                       page->index <<= NR_PCP_ORDER_WIDTH;
-                       page->index |= order;
-
-                       list_add_tail(&page->lru, &head);
-
                        /*
                         * We are going to put the page back to the global
                         * pool, prefetch its buddy to speed up later access
                                prefetch_buddy(page, order);
                                prefetch_nr--;
                        }
-               } while (count > 0 && !list_empty(list));
-       }
 
-       /*
-        * local_lock_irq held so equivalent to spin_lock_irqsave for
-        * both PREEMPT_RT and non-PREEMPT_RT configurations.
-        */
-       spin_lock(&zone->lock);
-       isolated_pageblocks = has_isolate_pageblock(zone);
+                       /* MIGRATE_ISOLATE page should not go to pcplists */
+                       VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
+                       /* Pageblock could have been isolated meanwhile */
+                       if (unlikely(isolated_pageblocks))
+                               mt = get_pageblock_migratetype(page);
 
-       /*
-        * Use safe version since after __free_one_page(),
-        * page->lru.next will not point to original list.
-        */
-       list_for_each_entry_safe(page, tmp, &head, lru) {
-               int mt = get_pcppage_migratetype(page);
-
-               /* mt has been encoded with the order (see above) */
-               order = mt & NR_PCP_ORDER_MASK;
-               mt >>= NR_PCP_ORDER_WIDTH;
-
-               /* MIGRATE_ISOLATE page should not go to pcplists */
-               VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
-               /* Pageblock could have been isolated meanwhile */
-               if (unlikely(isolated_pageblocks))
-                       mt = get_pageblock_migratetype(page);
-
-               __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
-               trace_mm_page_pcpu_drain(page, order, mt);
+                       __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
+                       trace_mm_page_pcpu_drain(page, order, mt);
+               } while (count > 0 && !list_empty(list));
        }
+
        spin_unlock(&zone->lock);
 }