page_poisoning_enabled() && poisoned;
 }
 
-static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
+#ifdef CONFIG_DEBUG_VM
+static bool check_pcp_refill(struct page *page)
+{
+       return false;
+}
+
+static bool check_new_pcp(struct page *page)
+{
+       return check_new_page(page);
+}
+#else
+static bool check_pcp_refill(struct page *page)
+{
+       return check_new_page(page);
+}
+static bool check_new_pcp(struct page *page)
+{
+       return false;
+}
+#endif /* CONFIG_DEBUG_VM */
+
+static bool check_new_pages(struct page *page, unsigned int order)
+{
+       int i;
+       for (i = 0; i < (1 << order); i++) {
+               struct page *p = page + i;
+
+               if (unlikely(check_new_page(p)))
+                       return true;
+       }
+
+       return false;
+}
+
+static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
                                                        unsigned int alloc_flags)
 {
        int i;
 
        for (i = 0; i < (1 << order); i++) {
                struct page *p = page + i;
-               if (unlikely(check_new_page(p)))
-                       return 1;
                if (poisoned)
                        poisoned &= page_is_poisoned(p);
        }
                set_page_pfmemalloc(page);
        else
                clear_page_pfmemalloc(page);
-
-       return 0;
 }
 
 /*
                if (unlikely(page == NULL))
                        break;
 
+               if (unlikely(check_pcp_refill(page)))
+                       continue;
+
                /*
                 * Split buddy pages returned by expand() are received here
                 * in physical page order. The page is added to the callers and
                struct list_head *list;
 
                local_irq_save(flags);
-               pcp = &this_cpu_ptr(zone->pageset)->pcp;
-               list = &pcp->lists[migratetype];
-               if (list_empty(list)) {
-                       pcp->count += rmqueue_bulk(zone, 0,
-                                       pcp->batch, list,
-                                       migratetype, cold);
-                       if (unlikely(list_empty(list)))
-                               goto failed;
-               }
+               do {
+                       pcp = &this_cpu_ptr(zone->pageset)->pcp;
+                       list = &pcp->lists[migratetype];
+                       if (list_empty(list)) {
+                               pcp->count += rmqueue_bulk(zone, 0,
+                                               pcp->batch, list,
+                                               migratetype, cold);
+                               if (unlikely(list_empty(list)))
+                                       goto failed;
+                       }
 
-               if (cold)
-                       page = list_last_entry(list, struct page, lru);
-               else
-                       page = list_first_entry(list, struct page, lru);
+                       if (cold)
+                               page = list_last_entry(list, struct page, lru);
+                       else
+                               page = list_first_entry(list, struct page, lru);
+               } while (page && check_new_pcp(page));
 
                __dec_zone_state(zone, NR_ALLOC_BATCH);
                list_del(&page->lru);
                WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
                spin_lock_irqsave(&zone->lock, flags);
 
-               page = NULL;
-               if (alloc_flags & ALLOC_HARDER) {
-                       page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
-                       if (page)
-                               trace_mm_page_alloc_zone_locked(page, order, migratetype);
-               }
-               if (!page)
-                       page = __rmqueue(zone, order, migratetype);
+               do {
+                       page = NULL;
+                       if (alloc_flags & ALLOC_HARDER) {
+                               page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
+                               if (page)
+                                       trace_mm_page_alloc_zone_locked(page, order, migratetype);
+                       }
+                       if (!page)
+                               page = __rmqueue(zone, order, migratetype);
+               } while (page && check_new_pages(page, order));
                spin_unlock(&zone->lock);
                if (!page)
                        goto failed;
                page = buffered_rmqueue(ac->preferred_zoneref->zone, zone, order,
                                gfp_mask, alloc_flags, ac->migratetype);
                if (page) {
-                       if (prep_new_page(page, order, gfp_mask, alloc_flags))
-                               goto try_this_zone;
+                       prep_new_page(page, order, gfp_mask, alloc_flags);
 
                        /*
                         * If this is a high-order atomic allocation then check