}
 
 static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
-                                  gfp_t gfp_mask, struct list_head *list)
+                                  struct list_head *list)
 {
+       gfp_t gfp_mask = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_THISNODE;
        unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
        int nid = page_to_nid((struct page *)start);
        struct page *page, *next;
  * @end:       end address of the vmemmap virtual address range that we want to
  *             remap.
  * @reuse:     reuse address.
- * @gfp_mask:  GFP flag for allocating vmemmap pages.
  *
  * Return: %0 on success, negative error code otherwise.
  */
 static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
-                              unsigned long reuse, gfp_t gfp_mask)
+                              unsigned long reuse)
 {
        LIST_HEAD(vmemmap_pages);
        struct vmemmap_remap_walk walk = {
        /* See the comment in the vmemmap_remap_free(). */
        BUG_ON(start - reuse != PAGE_SIZE);
 
-       if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages))
+       if (alloc_vmemmap_page_list(start, end, &vmemmap_pages))
                return -ENOMEM;
 
        mmap_read_lock(&init_mm);
         * When a HugeTLB page is freed to the buddy allocator, previously
         * discarded vmemmap pages must be allocated and remapping.
         */
-       ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse,
-                                 GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE);
+       ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse);
        if (!ret) {
                ClearHPageVmemmapOptimized(head);
                static_branch_dec(&hugetlb_optimize_vmemmap_key);