{
        struct zone *last_compact_zone = NULL;
        unsigned long compact_result;
-
+       struct page *page;
 
        if (!order)
                return NULL;
                                                &last_compact_zone);
        current->flags &= ~PF_MEMALLOC;
 
-       if (compact_result > COMPACT_DEFERRED)
-               count_vm_event(COMPACTSTALL);
-       else
+       switch (compact_result) {
+       case COMPACT_DEFERRED:
                *deferred_compaction = true;
+               /* fall-through */
+       case COMPACT_SKIPPED:
+               return NULL;
+       default:
+               break;
+       }
 
-       if (compact_result > COMPACT_SKIPPED) {
-               struct page *page;
+       /*
+        * At least in one zone compaction wasn't deferred or skipped, so let's
+        * count a compaction stall
+        */
+       count_vm_event(COMPACTSTALL);
 
-               /* Page migration frees to the PCP lists but we want merging */
-               drain_pages(get_cpu());
-               put_cpu();
+       /* Page migration frees to the PCP lists but we want merging */
+       drain_pages(get_cpu());
+       put_cpu();
 
-               page = get_page_from_freelist(gfp_mask, nodemask,
-                               order, zonelist, high_zoneidx,
-                               alloc_flags & ~ALLOC_NO_WATERMARKS,
-                               preferred_zone, classzone_idx, migratetype);
+       page = get_page_from_freelist(gfp_mask, nodemask,
+                       order, zonelist, high_zoneidx,
+                       alloc_flags & ~ALLOC_NO_WATERMARKS,
+                       preferred_zone, classzone_idx, migratetype);
 
-               if (page) {
-                       struct zone *zone = page_zone(page);
+       if (page) {
+               struct zone *zone = page_zone(page);
 
-                       zone->compact_blockskip_flush = false;
-                       compaction_defer_reset(zone, order, true);
-                       count_vm_event(COMPACTSUCCESS);
-                       return page;
-               }
+               zone->compact_blockskip_flush = false;
+               compaction_defer_reset(zone, order, true);
+               count_vm_event(COMPACTSUCCESS);
+               return page;
+       }
 
-               /*
-                * last_compact_zone is where try_to_compact_pages thought
-                * allocation should succeed, so it did not defer compaction.
-                * But now we know that it didn't succeed, so we do the defer.
-                */
-               if (last_compact_zone && mode != MIGRATE_ASYNC)
-                       defer_compaction(last_compact_zone, order);
+       /*
+        * last_compact_zone is where try_to_compact_pages thought allocation
+        * should succeed, so it did not defer compaction. But here we know
+        * that it didn't succeed, so we do the defer.
+        */
+       if (last_compact_zone && mode != MIGRATE_ASYNC)
+               defer_compaction(last_compact_zone, order);
 
-               /*
-                * It's bad if compaction run occurs and fails.
-                * The most likely reason is that pages exist,
-                * but not enough to satisfy watermarks.
-                */
-               count_vm_event(COMPACTFAIL);
+       /*
+        * It's bad if compaction run occurs and fails. The most likely reason
+        * is that pages exist, but not enough to satisfy watermarks.
+        */
+       count_vm_event(COMPACTFAIL);
 
-               cond_resched();
-       }
+       cond_resched();
 
        return NULL;
 }