}
 
 /*
- * Frees a list of pages. 
+ * Frees a number of pages from the PCP lists
  * Assumes all pages on list are in same zone, and of same order.
  * count is the number of pages to free.
  *
  * And clear the zone's pages_scanned counter, to hold off the "all pages are
  * pinned" detection logic.
  */
-static void free_pages_bulk(struct zone *zone, int count,
-                                       struct list_head *list, int order)
+static void free_pcppages_bulk(struct zone *zone, int count,
+                                       struct per_cpu_pages *pcp)
 {
+       int migratetype = 0;
+
        spin_lock(&zone->lock);
        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
        zone->pages_scanned = 0;
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, count << order);
+       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
        while (count--) {
                struct page *page;
+               struct list_head *list;
+
+               /*
+                * Remove pages from lists in a round-robin fashion. This spinning
+                * around potentially empty lists is bloody awful, alternatives that
+                * don't suck are welcome
+                */
+               do {
+                       if (++migratetype == MIGRATE_PCPTYPES)
+                               migratetype = 0;
+                       list = &pcp->lists[migratetype];
+               } while (list_empty(list));
 
-               VM_BUG_ON(list_empty(list));
                page = list_entry(list->prev, struct page, lru);
                /* have to delete it as __free_one_page list manipulates */
                list_del(&page->lru);
-               trace_mm_page_pcpu_drain(page, order, page_private(page));
-               __free_one_page(page, zone, order, page_private(page));
+               trace_mm_page_pcpu_drain(page, 0, migratetype);
+               __free_one_page(page, zone, 0, migratetype);
        }
        spin_unlock(&zone->lock);
 }
                to_drain = pcp->batch;
        else
                to_drain = pcp->count;
-       free_pages_bulk(zone, to_drain, &pcp->list, 0);
+       free_pcppages_bulk(zone, to_drain, pcp);
        pcp->count -= to_drain;
        local_irq_restore(flags);
 }
 
                pcp = &pset->pcp;
                local_irq_save(flags);
-               free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+               free_pcppages_bulk(zone, pcp->count, pcp);
                pcp->count = 0;
                local_irq_restore(flags);
        }
        struct zone *zone = page_zone(page);
        struct per_cpu_pages *pcp;
        unsigned long flags;
+       int migratetype;
        int wasMlocked = __TestClearPageMlocked(page);
 
        kmemcheck_free_shadow(page, 0);
        kernel_map_pages(page, 1, 0);
 
        pcp = &zone_pcp(zone, get_cpu())->pcp;
-       set_page_private(page, get_pageblock_migratetype(page));
+       migratetype = get_pageblock_migratetype(page);
+       set_page_private(page, migratetype);
        local_irq_save(flags);
        if (unlikely(wasMlocked))
                free_page_mlock(page);
        __count_vm_event(PGFREE);
 
+       /*
+        * We only track unmovable, reclaimable and movable on pcp lists.
+        * Free ISOLATE pages back to the allocator because they are being
+        * offlined but treat RESERVE as movable pages so we can get those
+        * areas back if necessary. Otherwise, we may have to free
+        * excessively into the page allocator
+        */
+       if (migratetype >= MIGRATE_PCPTYPES) {
+               if (unlikely(migratetype == MIGRATE_ISOLATE)) {
+                       free_one_page(zone, page, 0, migratetype);
+                       goto out;
+               }
+               migratetype = MIGRATE_MOVABLE;
+       }
+
        if (cold)
-               list_add_tail(&page->lru, &pcp->list);
+               list_add_tail(&page->lru, &pcp->lists[migratetype]);
        else
-               list_add(&page->lru, &pcp->list);
+               list_add(&page->lru, &pcp->lists[migratetype]);
        pcp->count++;
        if (pcp->count >= pcp->high) {
-               free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
+               free_pcppages_bulk(zone, pcp->batch, pcp);
                pcp->count -= pcp->batch;
        }
+
+out:
        local_irq_restore(flags);
        put_cpu();
 }
        cpu  = get_cpu();
        if (likely(order == 0)) {
                struct per_cpu_pages *pcp;
+               struct list_head *list;
 
                pcp = &zone_pcp(zone, cpu)->pcp;
+               list = &pcp->lists[migratetype];
                local_irq_save(flags);
-               if (!pcp->count) {
-                       pcp->count = rmqueue_bulk(zone, 0,
-                                       pcp->batch, &pcp->list,
-                                       migratetype, cold);
-                       if (unlikely(!pcp->count))
-                               goto failed;
-               }
-
-               /* Find a page of the appropriate migrate type */
-               if (cold) {
-                       list_for_each_entry_reverse(page, &pcp->list, lru)
-                               if (page_private(page) == migratetype)
-                                       break;
-               } else {
-                       list_for_each_entry(page, &pcp->list, lru)
-                               if (page_private(page) == migratetype)
-                                       break;
-               }
-
-               /* Allocate more to the pcp list if necessary */
-               if (unlikely(&page->lru == &pcp->list)) {
-                       int get_one_page = 0;
-
+               if (list_empty(list)) {
                        pcp->count += rmqueue_bulk(zone, 0,
-                                       pcp->batch, &pcp->list,
+                                       pcp->batch, list,
                                        migratetype, cold);
-                       list_for_each_entry(page, &pcp->list, lru) {
-                               if (get_pageblock_migratetype(page) !=
-                                           MIGRATE_ISOLATE) {
-                                       get_one_page = 1;
-                                       break;
-                               }
-                       }
-                       if (!get_one_page)
+                       if (unlikely(list_empty(list)))
                                goto failed;
                }
 
+               if (cold)
+                       page = list_entry(list->prev, struct page, lru);
+               else
+                       page = list_entry(list->next, struct page, lru);
+
                list_del(&page->lru);
                pcp->count--;
        } else {
 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
 {
        struct per_cpu_pages *pcp;
+       int migratetype;
 
        memset(p, 0, sizeof(*p));
 
        pcp->count = 0;
        pcp->high = 6 * batch;
        pcp->batch = max(1UL, 1 * batch);
-       INIT_LIST_HEAD(&pcp->list);
+       for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
+               INIT_LIST_HEAD(&pcp->lists[migratetype]);
 }
 
 /*
                pcp = &pset->pcp;
 
                local_irq_save(flags);
-               free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+               free_pcppages_bulk(zone, pcp->count, pcp);
                setup_pageset(pset, batch);
                local_irq_restore(flags);
        }