unsigned int order, unsigned int nr_pages, struct page **pages)
 {
        unsigned int nr_allocated = 0;
-       gfp_t alloc_gfp = gfp;
-       bool nofail = gfp & __GFP_NOFAIL;
        struct page *page;
        int i;
 
         * more permissive.
         */
        if (!order) {
-               /* bulk allocator doesn't support nofail req. officially */
-               gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
-
                while (nr_allocated < nr_pages) {
                        unsigned int nr, nr_pages_request;
 
                         * but mempolicy wants to alloc memory by interleaving.
                         */
                        if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
-                               nr = alloc_pages_bulk_array_mempolicy_noprof(bulk_gfp,
+                               nr = alloc_pages_bulk_array_mempolicy_noprof(gfp,
                                                        nr_pages_request,
                                                        pages + nr_allocated);
-
                        else
-                               nr = alloc_pages_bulk_array_node_noprof(bulk_gfp, nid,
+                               nr = alloc_pages_bulk_array_node_noprof(gfp, nid,
                                                        nr_pages_request,
                                                        pages + nr_allocated);
 
                        if (nr != nr_pages_request)
                                break;
                }
-       } else if (gfp & __GFP_NOFAIL) {
-               /*
-                * Higher order nofail allocations are really expensive and
-                * potentially dangerous (pre-mature OOM, disruptive reclaim
-                * and compaction etc.
-                */
-               alloc_gfp &= ~__GFP_NOFAIL;
        }
 
        /* High-order pages or fallback path if "bulk" fails. */
        while (nr_allocated < nr_pages) {
-               if (!nofail && fatal_signal_pending(current))
+               if (!(gfp & __GFP_NOFAIL) && fatal_signal_pending(current))
                        break;
 
                if (nid == NUMA_NO_NODE)
-                       page = alloc_pages_noprof(alloc_gfp, order);
+                       page = alloc_pages_noprof(gfp, order);
                else
-                       page = alloc_pages_node_noprof(nid, alloc_gfp, order);
+                       page = alloc_pages_node_noprof(nid, gfp, order);
+
                if (unlikely(!page))
                        break;
 
                /*
                 * Higher order allocations must be able to be treated as
-                * indepdenent small pages by callers (as they can with
+                * independent small pages by callers (as they can with
                 * small-page vmallocs). Some drivers do their own refcounting
                 * on vmalloc_to_page() pages, some use page->mapping,
                 * page->lru, etc.
        set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
        page_order = vm_area_page_order(area);
 
-       area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
+       /*
+        * Higher order nofail allocations are really expensive and
+        * potentially dangerous (pre-mature OOM, disruptive reclaim
+        * and compaction etc.
+        *
+        * Please note, the __vmalloc_node_range_noprof() falls-back
+        * to order-0 pages if high-order attempt is unsuccessful.
+        */
+       area->nr_pages = vm_area_alloc_pages((page_order ?
+               gfp_mask & ~__GFP_NOFAIL : gfp_mask) | __GFP_NOWARN,
                node, page_order, nr_small_pages, area->pages);
 
        atomic_long_add(area->nr_pages, &nr_vmalloc_pages);