* allocation success. 1 << compact_defer_limit compactions are skipped up
  * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
  */
-static inline void defer_compaction(struct zone *zone)
+static inline void defer_compaction(struct zone *zone, int order)
 {
        zone->compact_considered = 0;
        zone->compact_defer_shift++;
 
+       if (order < zone->compact_order_failed)
+               zone->compact_order_failed = order;
+
        if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
                zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
 }
 
 /* Returns true if compaction should be skipped this time */
-static inline bool compaction_deferred(struct zone *zone)
+static inline bool compaction_deferred(struct zone *zone, int order)
 {
        unsigned long defer_limit = 1UL << zone->compact_defer_shift;
 
+       if (order < zone->compact_order_failed)
+               return false;
+
        /* Avoid possible overflow */
        if (++zone->compact_considered > defer_limit)
                zone->compact_considered = defer_limit;
        return COMPACT_SKIPPED;
 }
 
-static inline void defer_compaction(struct zone *zone)
+static inline void defer_compaction(struct zone *zone, int order)
 {
 }
 
-static inline bool compaction_deferred(struct zone *zone)
+static inline bool compaction_deferred(struct zone *zone, int order)
 {
        return 1;
 }
 
         */
        unsigned int            compact_considered;
        unsigned int            compact_defer_shift;
+       int                     compact_order_failed;
 #endif
 
        ZONE_PADDING(_pad1_)
 
                INIT_LIST_HEAD(&cc->freepages);
                INIT_LIST_HEAD(&cc->migratepages);
 
-               if (cc->order < 0 || !compaction_deferred(zone))
+               if (cc->order < 0 || !compaction_deferred(zone, cc->order))
                        compact_zone(zone, cc);
 
+               if (cc->order > 0) {
+                       int ok = zone_watermark_ok(zone, cc->order,
+                                               low_wmark_pages(zone), 0, 0);
+                       if (ok && cc->order > zone->compact_order_failed)
+                               zone->compact_order_failed = cc->order + 1;
+                       /* Currently async compaction is never deferred. */
+                       else if (!ok && cc->sync)
+                               defer_compaction(zone, cc->order);
+               }
+
                VM_BUG_ON(!list_empty(&cc->freepages));
                VM_BUG_ON(!list_empty(&cc->migratepages));
        }
 
        if (!order)
                return NULL;
 
-       if (compaction_deferred(preferred_zone)) {
+       if (compaction_deferred(preferred_zone, order)) {
                *deferred_compaction = true;
                return NULL;
        }
                if (page) {
                        preferred_zone->compact_considered = 0;
                        preferred_zone->compact_defer_shift = 0;
+                       if (order >= preferred_zone->compact_order_failed)
+                               preferred_zone->compact_order_failed = order + 1;
                        count_vm_event(COMPACTSUCCESS);
                        return page;
                }
                 * defer if the failure was a sync compaction failure.
                 */
                if (sync_migration)
-                       defer_compaction(preferred_zone);
+                       defer_compaction(preferred_zone, order);
 
                cond_resched();
        }
 
         * If compaction is deferred, reclaim up to a point where
         * compaction will have a chance of success when re-enabled
         */
-       if (compaction_deferred(zone))
+       if (compaction_deferred(zone, sc->order))
                return watermark_ok;
 
        /* If compaction is not ready to start, keep reclaiming */