};
 
 struct page *alloc_huge_page_node(struct hstate *h, int nid);
+struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
+                               unsigned long addr, int avoid_reserve);
 
 /* arch callback */
 int __init alloc_bootmem_huge_page(struct hstate *h);
 #else  /* CONFIG_HUGETLB_PAGE */
 struct hstate {};
 #define alloc_huge_page_node(h, nid) NULL
+#define alloc_huge_page_noerr(v, a, r) NULL
 #define alloc_bootmem_huge_page(h) NULL
 #define hstate_file(f) NULL
 #define hstate_sizelog(s) NULL
 
        return page;
 }
 
+/*
+ * alloc_huge_page()'s wrapper which simply returns the page if allocation
+ * succeeds, otherwise NULL. This function is called from new_vma_page(),
+ * where no ERR_VALUE is expected to be returned.
+ */
+struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
+                               unsigned long addr, int avoid_reserve)
+{
+       struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
+       if (IS_ERR(page))
+               page = NULL;
+       return page;
+}
+
 int __weak alloc_bootmem_huge_page(struct hstate *h)
 {
        struct huge_bootmem_page *m;
 
                vma = vma->vm_next;
        }
 
+       if (PageHuge(page))
+               return alloc_huge_page_noerr(vma, address, 1);
        /*
         * if !vma, alloc_page_vma() will use task or system default policy
         */
                                        (unsigned long)vma,
                                        MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
                        if (nr_failed)
-                               putback_lru_pages(&pagelist);
+                               putback_movable_pages(&pagelist);
                }
 
                if (nr_failed && (flags & MPOL_MF_STRICT))