}
 
 /*
- * The generic version pmdp_get_and_clear uses a version of pmd_clear() with a
+ * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
  * different prototype.
  */
-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
-                                      unsigned long address, pmd_t *pmdp)
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+                                           unsigned long address, pmd_t *pmdp)
 {
        pmd_t old = *pmdp;
 
 
 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
                                  unsigned long address, pmd_t *pmdp);
 
-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
-extern pmd_t pmdp_get_and_clear(struct mm_struct *mm,
-                               unsigned long addr, pmd_t *pmdp);
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+                                    unsigned long addr, pmd_t *pmdp);
 
 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
 static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 
        return;
 }
 
-pmd_t pmdp_get_and_clear(struct mm_struct *mm,
-                        unsigned long addr, pmd_t *pmdp)
+pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+                             unsigned long addr, pmd_t *pmdp)
 {
        pmd_t old_pmd;
        pgtable_t pgtable;
 
        return pmd_young(pmd);
 }
 
-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
-                                      unsigned long address, pmd_t *pmdp)
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+                                           unsigned long address, pmd_t *pmdp)
 {
        pmd_t pmd = *pmdp;
 
        return pmd;
 }
 
-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
-static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
-                                           unsigned long address,
-                                           pmd_t *pmdp, int full)
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
+static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
+                                                unsigned long address,
+                                                pmd_t *pmdp, int full)
 {
        pmd_t pmd = *pmdp;
 
        return pmd;
 }
 
-#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
-static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
-                                    unsigned long address, pmd_t *pmdp)
+#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
+static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
+                                         unsigned long address, pmd_t *pmdp)
 {
-       return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
+       return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
 }
 
 #define __HAVE_ARCH_PMDP_INVALIDATE
                                        unsigned long address,
                                        pmd_t *pmdp)
 {
-       return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
+       return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
 }
 #define pmdp_collapse_flush pmdp_collapse_flush
 
 
 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
                   pte_t *ptep, pte_t orig, int fullmm);
 
-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
-                                      unsigned long addr,
-                                      pmd_t *pmdp)
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+                                           unsigned long addr,
+                                           pmd_t *pmdp)
 {
        pmd_t pmd = *pmdp;
        set_pmd_at(mm, addr, pmdp, __pmd(0UL));
 
 }
 
 
-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
-                                      unsigned long address,
-                                      pmd_t *pmdp)
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+                                           unsigned long address,
+                                           pmd_t *pmdp)
 {
        return pte_pmd(ptep_get_and_clear(mm, address, pmdp_ptep(pmdp)));
 }
 
        return pmd_flags(pmd) & _PAGE_RW;
 }
 
-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
                                       pmd_t *pmdp)
 {
        pmd_t pmd = native_pmdp_get_and_clear(pmdp);
 
 }
 #endif
 
-#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
-                                      unsigned long address,
-                                      pmd_t *pmdp)
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+                                           unsigned long address,
+                                           pmd_t *pmdp)
 {
        pmd_t pmd = *pmdp;
        pmd_clear(pmdp);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
-#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
+static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
                                            unsigned long address, pmd_t *pmdp,
                                            int full)
 {
-       return pmdp_get_and_clear(mm, address, pmdp);
+       return pmdp_huge_get_and_clear(mm, address, pmdp);
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
                              pte_t *ptep);
 #endif
 
-#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
-extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
+#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
+extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
                              unsigned long address,
                              pmd_t *pmdp);
 #endif
 
        ___pte;                                                         \
 })
 
-#define pmdp_clear_flush_notify(__vma, __haddr, __pmd)                 \
+#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd)            \
 ({                                                                     \
        unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;              \
        struct mm_struct *___mm = (__vma)->vm_mm;                       \
        pmd_t ___pmd;                                                   \
                                                                        \
-       ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd);               \
+       ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd);          \
        mmu_notifier_invalidate_range(___mm, ___haddr,                  \
                                      ___haddr + HPAGE_PMD_SIZE);       \
                                                                        \
        ___pmd;                                                         \
 })
 
-#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd)                        \
+#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd)           \
 ({                                                                     \
        unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;              \
        pmd_t ___pmd;                                                   \
                                                                        \
-       ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd);              \
+       ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd);         \
        mmu_notifier_invalidate_range(__mm, ___haddr,                   \
                                      ___haddr + HPAGE_PMD_SIZE);       \
                                                                        \
 #define ptep_clear_flush_young_notify ptep_clear_flush_young
 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
 #define        ptep_clear_flush_notify ptep_clear_flush
-#define pmdp_clear_flush_notify pmdp_clear_flush
-#define pmdp_get_and_clear_notify pmdp_get_and_clear
+#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
+#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
 #define set_pte_at_notify set_pte_at
 
 #endif /* CONFIG_MMU_NOTIFIER */
 
                goto out_free_pages;
        VM_BUG_ON_PAGE(!PageHead(page), page);
 
-       pmdp_clear_flush_notify(vma, haddr, pmd);
+       pmdp_huge_clear_flush_notify(vma, haddr, pmd);
        /* leave pmd empty until pte is filled */
 
        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
                pmd_t entry;
                entry = mk_huge_pmd(new_page, vma->vm_page_prot);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-               pmdp_clear_flush_notify(vma, haddr, pmd);
+               pmdp_huge_clear_flush_notify(vma, haddr, pmd);
                page_add_new_anon_rmap(new_page, vma, haddr);
                mem_cgroup_commit_charge(new_page, memcg, false);
                lru_cache_add_active_or_unevictable(new_page, vma);
                pmd_t orig_pmd;
                /*
                 * For architectures like ppc64 we look at deposited pgtable
-                * when calling pmdp_get_and_clear. So do the
+                * when calling pmdp_huge_get_and_clear. So do the
                 * pgtable_trans_huge_withdraw after finishing pmdp related
                 * operations.
                 */
-               orig_pmd = pmdp_get_and_clear_full(tlb->mm, addr, pmd,
-                                                  tlb->fullmm);
+               orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
+                                                       tlb->fullmm);
                tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
                pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
                if (is_huge_zero_pmd(orig_pmd)) {
                new_ptl = pmd_lockptr(mm, new_pmd);
                if (new_ptl != old_ptl)
                        spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
-               pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
+               pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
                VM_BUG_ON(!pmd_none(*new_pmd));
 
                if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
                }
 
                if (!prot_numa || !pmd_protnone(*pmd)) {
-                       entry = pmdp_get_and_clear_notify(mm, addr, pmd);
+                       entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
                        entry = pmd_modify(entry, newprot);
                        if (preserve_write)
                                entry = pmd_mkwrite(entry);
        pmd_t _pmd;
        int i;
 
-       pmdp_clear_flush_notify(vma, haddr, pmd);
+       pmdp_huge_clear_flush_notify(vma, haddr, pmd);
        /* leave pmd empty until pte is filled */
 
        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
 
         */
        flush_cache_range(vma, mmun_start, mmun_end);
        page_add_anon_rmap(new_page, vma, mmun_start);
-       pmdp_clear_flush_notify(vma, mmun_start, pmd);
+       pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
        set_pmd_at(mm, mmun_start, pmd, entry);
        flush_tlb_range(vma, mmun_start, mmun_end);
        update_mmu_cache_pmd(vma, address, &entry);
 
 }
 #endif
 
-#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
+#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
-                      pmd_t *pmdp)
+pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
+                           pmd_t *pmdp)
 {
        pmd_t pmd;
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
        VM_BUG_ON(!pmd_trans_huge(*pmdp));
-       pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
+       pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
        flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        return pmd;
 }
 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
                          pmd_t *pmdp)
 {
+       /*
+        * pmd and hugepage pte format are same. So we could
+        * use the same function.
+        */
        pmd_t pmd;
 
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
        VM_BUG_ON(pmd_trans_huge(*pmdp));
-       pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
+       pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
        flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        return pmd;
 }
 
 
        pmd = pmd_offset(pud, address);
        /*
-        * Some THP functions use the sequence pmdp_clear_flush(), set_pmd_at()
+        * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
         * without holding anon_vma lock for write.  So when looking for a
         * genuine pmde (in which to find pte), test present and !THP together.
         */