static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                                        struct vm_area_struct *vma,
                                        unsigned long haddr, pmd_t *pmd,
-                                       struct page *page)
+                                       struct page *page, gfp_t gfp)
 {
        struct mem_cgroup *memcg;
        pgtable_t pgtable;
 
        VM_BUG_ON_PAGE(!PageCompound(page), page);
 
-       if (mem_cgroup_try_charge(page, mm, GFP_TRANSHUGE, &memcg))
+       if (mem_cgroup_try_charge(page, mm, gfp, &memcg))
                return VM_FAULT_OOM;
 
        pgtable = pte_alloc_one(mm, haddr);
                count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
        }
-       if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
+       if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page, gfp))) {
                put_page(page);
                count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
        unsigned long haddr;
        unsigned long mmun_start;       /* For mmu_notifiers */
        unsigned long mmun_end;         /* For mmu_notifiers */
+       gfp_t huge_gfp;                 /* for allocation and charge */
 
        ptl = pmd_lockptr(mm, pmd);
        VM_BUG_ON_VMA(!vma->anon_vma, vma);
 alloc:
        if (transparent_hugepage_enabled(vma) &&
            !transparent_hugepage_debug_cow()) {
-               gfp_t gfp;
-
-               gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
-               new_page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
+               huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
+               new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
        } else
                new_page = NULL;
 
                goto out;
        }
 
-       if (unlikely(mem_cgroup_try_charge(new_page, mm,
-                                          GFP_TRANSHUGE, &memcg))) {
+       if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg))) {
                put_page(new_page);
                if (page) {
                        split_huge_page(page);
        return true;
 }
 
-static struct page
-*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
+static struct page *
+khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
                       struct vm_area_struct *vma, unsigned long address,
                       int node)
 {
-       gfp_t flags;
-
        VM_BUG_ON_PAGE(*hpage, *hpage);
 
-       /* Only allocate from the target node */
-       flags = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
-               __GFP_THISNODE;
-
        /*
         * Before allocating the hugepage, release the mmap_sem read lock.
         * The allocation can take potentially a long time if it involves
         */
        up_read(&mm->mmap_sem);
 
-       *hpage = alloc_pages_exact_node(node, flags, HPAGE_PMD_ORDER);
+       *hpage = alloc_pages_exact_node(node, gfp, HPAGE_PMD_ORDER);
        if (unlikely(!*hpage)) {
                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                *hpage = ERR_PTR(-ENOMEM);
        return true;
 }
 
-static struct page
-*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
+static struct page *
+khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
                       struct vm_area_struct *vma, unsigned long address,
                       int node)
 {
        up_read(&mm->mmap_sem);
        VM_BUG_ON(!*hpage);
+
        return  *hpage;
 }
 #endif
        struct mem_cgroup *memcg;
        unsigned long mmun_start;       /* For mmu_notifiers */
        unsigned long mmun_end;         /* For mmu_notifiers */
+       gfp_t gfp;
 
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
+       /* Only allocate from the target node */
+       gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
+               __GFP_THISNODE;
+
        /* release the mmap_sem read lock. */
-       new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
+       new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node);
        if (!new_page)
                return;
 
        if (unlikely(mem_cgroup_try_charge(new_page, mm,
-                                          GFP_TRANSHUGE, &memcg)))
+                                          gfp, &memcg)))
                return;
 
        /*