* potentially hurts the reliability of high-order allocations when under
  * intense memory pressure but failed atomic allocations should be easier
  * to recover from than an OOM.
+ *
+ * If @force is true, try to unreserve a pageblock even though highatomic
+ * pageblock is exhausted.
  */
-static bool unreserve_highatomic_pageblock(const struct alloc_context *ac)
+static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
+                                               bool force)
 {
        struct zonelist *zonelist = ac->zonelist;
        unsigned long flags;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
                                                                ac->nodemask) {
-               /* Preserve at least one pageblock */
-               if (zone->nr_reserved_highatomic <= pageblock_nr_pages)
+               /*
+                * Preserve at least one pageblock unless memory pressure
+                * is really high.
+                */
+               if (!force && zone->nr_reserved_highatomic <=
+                                       pageblock_nr_pages)
                        continue;
 
                spin_lock_irqsave(&zone->lock, flags);
                         */
                        set_pageblock_migratetype(page, ac->migratetype);
                        ret = move_freepages_block(zone, page, ac->migratetype);
-                       spin_unlock_irqrestore(&zone->lock, flags);
-                       return ret;
+                       if (ret) {
+                               spin_unlock_irqrestore(&zone->lock, flags);
+                               return ret;
+                       }
                }
                spin_unlock_irqrestore(&zone->lock, flags);
        }
         * Shrink them them and try again
         */
        if (!page && !drained) {
-               unreserve_highatomic_pageblock(ac);
+               unreserve_highatomic_pageblock(ac, false);
                drain_all_pages(NULL);
                drained = true;
                goto retry;
         */
        if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
                /* Before OOM, exhaust highatomic_reserve */
-               return unreserve_highatomic_pageblock(ac);
+               return unreserve_highatomic_pageblock(ac, true);
        }
 
        /*