return zone_spans_pfn(zone, last_pfn);
 }
 
-static struct page *alloc_gigantic_page(int nid, struct hstate *h)
+static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
+               int nid, nodemask_t *nodemask)
 {
        unsigned int order = huge_page_order(h);
        unsigned long nr_pages = 1 << order;
        struct zonelist *zonelist;
        struct zone *zone;
        struct zoneref *z;
-       gfp_t gfp_mask;
 
-       gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
        zonelist = node_zonelist(nid, gfp_mask);
-       for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), NULL) {
+       for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
                spin_lock_irqsave(&zone->lock, flags);
 
                pfn = ALIGN(zone->zone_start_pfn, nr_pages);
 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
 
-static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
-{
-       struct page *page;
-
-       page = alloc_gigantic_page(nid, h);
-       if (page) {
-               prep_compound_gigantic_page(page, huge_page_order(h));
-               prep_new_huge_page(h, page, nid);
-               put_page(page); /* free it into the hugepage allocator */
-       }
-
-       return page;
-}
-
-static int alloc_fresh_gigantic_page(struct hstate *h,
-                               nodemask_t *nodes_allowed)
-{
-       struct page *page = NULL;
-       int nr_nodes, node;
-
-       for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
-               page = alloc_fresh_gigantic_page_node(h, node);
-               if (page)
-                       return 1;
-       }
-
-       return 0;
-}
-
 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
 static inline bool gigantic_page_supported(void) { return false; }
+static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
+               int nid, nodemask_t *nodemask) { return NULL; }
 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
 static inline void destroy_compound_gigantic_page(struct page *page,
                                                unsigned int order) { }
-static inline int alloc_fresh_gigantic_page(struct hstate *h,
-                                       nodemask_t *nodes_allowed) { return 0; }
 #endif
 
 static void update_and_free_page(struct hstate *h, struct page *page)
        gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
 
        for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
-               page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask,
-                               node, nodes_allowed);
+               if (hstate_is_gigantic(h))
+                       page = alloc_gigantic_page(h, gfp_mask,
+                                       node, nodes_allowed);
+               else
+                       page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask,
+                                       node, nodes_allowed);
                if (page)
                        break;
 
        if (!page)
                return 0;
 
+       if (hstate_is_gigantic(h))
+               prep_compound_gigantic_page(page, huge_page_order(h));
        prep_new_huge_page(h, page, page_to_nid(page));
        put_page(page); /* free it into the hugepage allocator */
 
                /* yield cpu to avoid soft lockup */
                cond_resched();
 
-               if (hstate_is_gigantic(h))
-                       ret = alloc_fresh_gigantic_page(h, nodes_allowed);
-               else
-                       ret = alloc_fresh_huge_page(h, nodes_allowed);
+               ret = alloc_fresh_huge_page(h, nodes_allowed);
                spin_lock(&hugetlb_lock);
                if (!ret)
                        goto out;