unsigned long end, struct mm_walk *walk)
 {
        struct vm_area_struct *vma = walk->vma;
-       split_huge_page_pmd(vma, addr, pmd);
+       split_huge_pmd(vma, pmd, addr);
        return 0;
 }
 
 
        if (pud_none_or_clear_bad(pud))
                goto out;
        pmd = pmd_offset(pud, 0xA0000);
-       split_huge_page_pmd_mm(mm, 0xA0000, pmd);
+
+       if (pmd_trans_huge(*pmd)) {
+               struct vm_area_struct *vma = find_vma(mm, 0xA0000);
+               split_huge_pmd(vma, pmd, 0xA0000);
+       }
        if (pmd_none_or_clear_bad(pmd))
                goto out;
        pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
 
 }
 extern void __split_huge_page_pmd(struct vm_area_struct *vma,
                unsigned long address, pmd_t *pmd);
-#define split_huge_page_pmd(__vma, __address, __pmd)                   \
+#define split_huge_pmd(__vma, __pmd, __address)                                \
        do {                                                            \
                pmd_t *____pmd = (__pmd);                               \
                if (unlikely(pmd_trans_huge(*____pmd)))                 \
                BUG_ON(pmd_trans_splitting(*____pmd) ||                 \
                       pmd_trans_huge(*____pmd));                       \
        } while (0)
-extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
-               pmd_t *pmd);
 #if HPAGE_PMD_ORDER >= MAX_ORDER
 #error "hugepages can't be allocated by the buddy allocator"
 #endif
 {
        return 0;
 }
-#define split_huge_page_pmd(__vma, __address, __pmd)   \
-       do { } while (0)
 #define wait_split_huge_page(__anon_vma, __pmd)        \
        do { } while (0)
-#define split_huge_page_pmd_mm(__mm, __address, __pmd) \
+#define split_huge_pmd(__vma, __pmd, __address)        \
        do { } while (0)
 static inline int hugepage_madvise(struct vm_area_struct *vma,
                                   unsigned long *vm_flags, int advice)
 
                if (is_huge_zero_page(page)) {
                        spin_unlock(ptl);
                        ret = 0;
-                       split_huge_page_pmd(vma, address, pmd);
+                       split_huge_pmd(vma, pmd, address);
                } else {
                        get_page(page);
                        spin_unlock(ptl);
 
 
        if (unlikely(!new_page)) {
                if (!page) {
-                       split_huge_page_pmd(vma, address, pmd);
+                       split_huge_pmd(vma, pmd, address);
                        ret |= VM_FAULT_FALLBACK;
                } else {
                        ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
                                        pmd, orig_pmd, page, haddr);
                        if (ret & VM_FAULT_OOM) {
-                               split_huge_page(page);
+                               split_huge_pmd(vma, pmd, address);
                                ret |= VM_FAULT_FALLBACK;
                        }
                        put_user_huge_page(page);
                                           true))) {
                put_page(new_page);
                if (page) {
-                       split_huge_page(page);
+                       split_huge_pmd(vma, pmd, address);
                        put_user_huge_page(page);
                } else
-                       split_huge_page_pmd(vma, address, pmd);
+                       split_huge_pmd(vma, pmd, address);
                ret |= VM_FAULT_FALLBACK;
                count_vm_event(THP_FAULT_FALLBACK);
                goto out;
                goto again;
 }
 
-void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
-               pmd_t *pmd)
-{
-       struct vm_area_struct *vma;
-
-       vma = find_vma(mm, address);
-       BUG_ON(vma == NULL);
-       split_huge_page_pmd(vma, address, pmd);
-}
-
-static void split_huge_page_address(struct mm_struct *mm,
+static void split_huge_pmd_address(struct vm_area_struct *vma,
                                    unsigned long address)
 {
        pgd_t *pgd;
 
        VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
 
-       pgd = pgd_offset(mm, address);
+       pgd = pgd_offset(vma->vm_mm, address);
        if (!pgd_present(*pgd))
                return;
 
                return;
 
        pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd))
+       if (!pmd_present(*pmd) || !pmd_trans_huge(*pmd))
                return;
        /*
         * Caller holds the mmap_sem write mode, so a huge pmd cannot
         * materialize from under us.
         */
-       split_huge_page_pmd_mm(mm, address, pmd);
+       __split_huge_page_pmd(vma, address, pmd);
 }
 
 void vma_adjust_trans_huge(struct vm_area_struct *vma,
        if (start & ~HPAGE_PMD_MASK &&
            (start & HPAGE_PMD_MASK) >= vma->vm_start &&
            (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
-               split_huge_page_address(vma->vm_mm, start);
+               split_huge_pmd_address(vma, start);
 
        /*
         * If the new end address isn't hpage aligned and it could
        if (end & ~HPAGE_PMD_MASK &&
            (end & HPAGE_PMD_MASK) >= vma->vm_start &&
            (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
-               split_huge_page_address(vma->vm_mm, end);
+               split_huge_pmd_address(vma, end);
 
        /*
         * If we're also updating the vma->vm_next->vm_start, if the new
                if (nstart & ~HPAGE_PMD_MASK &&
                    (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
                    (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
-                       split_huge_page_address(next->vm_mm, nstart);
+                       split_huge_pmd_address(next, nstart);
        }
 }
 
                                        BUG();
                                }
 #endif
-                               split_huge_page_pmd(vma, addr, pmd);
+                               split_huge_pmd(vma, pmd, addr);
                        } else if (zap_huge_pmd(tlb, vma, pmd, addr))
                                goto next;
                        /* fall through */
 
        pte_t *pte;
        spinlock_t *ptl;
 
-       split_huge_page_pmd(vma, addr, pmd);
+       split_huge_pmd(vma, pmd, addr);
        if (pmd_trans_unstable(pmd))
                return 0;
 
 
 
                if (pmd_trans_huge(*pmd)) {
                        if (next - addr != HPAGE_PMD_SIZE)
-                               split_huge_page_pmd(vma, addr, pmd);
+                               split_huge_pmd(vma, pmd, addr);
                        else {
                                int nr_ptes = change_huge_pmd(vma, pmd, addr,
                                                newprot, prot_numa);
 
                                need_flush = true;
                                continue;
                        } else if (!err) {
-                               split_huge_page_pmd(vma, old_addr, old_pmd);
+                               split_huge_pmd(vma, old_pmd, old_addr);
                        }
                        VM_BUG_ON(pmd_trans_huge(*old_pmd));
                }
 
                if (!walk->pte_entry)
                        continue;
 
-               split_huge_page_pmd_mm(walk->mm, addr, pmd);
+               split_huge_pmd(walk->vma, pmd, addr);
                if (pmd_trans_unstable(pmd))
                        goto again;
                err = walk_pte_range(pmd, addr, next, walk);