#define _ASM_IA64_HUGETLB_H
 
 #include <asm/page.h>
+#include <asm-generic/hugetlb.h>
 
 
 void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
 
 #define __ASM_HUGETLB_H
 
 #include <asm/page.h>
+#include <asm-generic/hugetlb.h>
 
 
 static inline int is_hugepage_only_range(struct mm_struct *mm,
 
 
 #ifdef CONFIG_HUGETLB_PAGE
 #include <asm/page.h>
+#include <asm-generic/hugetlb.h>
 
 extern struct kmem_cache *hugepte_cache;
 
 
 #define huge_ptep_set_wrprotect(__mm, __addr, __ptep)                  \
 ({                                                                     \
        pte_t __pte = huge_ptep_get(__ptep);                            \
-       if (pte_write(__pte)) {                                         \
+       if (huge_pte_write(__pte)) {                                    \
                huge_ptep_invalidate(__mm, __addr, __ptep);             \
                set_huge_pte_at(__mm, __addr, __ptep,                   \
                                huge_pte_wrprotect(__pte));             \
        huge_ptep_invalidate(vma->vm_mm, address, ptep);
 }
 
+static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
+{
+       pte_t pte;
+       pmd_t pmd;
+
+       pmd = mk_pmd_phys(page_to_phys(page), pgprot);
+       pte_val(pte) = pmd_val(pmd);
+       return pte;
+}
+
+static inline int huge_pte_write(pte_t pte)
+{
+       pmd_t pmd;
+
+       pmd_val(pmd) = pte_val(pte);
+       return pmd_write(pmd);
+}
+
+static inline int huge_pte_dirty(pte_t pte)
+{
+       /* No dirty bit in the segment table entry. */
+       return 0;
+}
+
+static inline pte_t huge_pte_mkwrite(pte_t pte)
+{
+       pmd_t pmd;
+
+       pmd_val(pmd) = pte_val(pte);
+       pte_val(pte) = pmd_val(pmd_mkwrite(pmd));
+       return pte;
+}
+
+static inline pte_t huge_pte_mkdirty(pte_t pte)
+{
+       /* No dirty bit in the segment table entry. */
+       return pte;
+}
+
+static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
+{
+       pmd_t pmd;
+
+       pmd_val(pmd) = pte_val(pte);
+       pte_val(pte) = pmd_val(pmd_modify(pmd, newprot));
+       return pte;
+}
+
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+                                 pte_t *ptep)
+{
+       pmd_clear((pmd_t *) ptep);
+}
+
 #endif /* _ASM_S390_HUGETLB_H */
 
 #define __S110 PAGE_RW
 #define __S111 PAGE_RW
 
+/*
+ * Segment entry (large page) protection definitions.
+ */
+#define SEGMENT_NONE   __pgprot(_HPAGE_TYPE_NONE)
+#define SEGMENT_RO     __pgprot(_HPAGE_TYPE_RO)
+#define SEGMENT_RW     __pgprot(_HPAGE_TYPE_RW)
+
 static inline int mm_exclusive(struct mm_struct *mm)
 {
        return likely(mm == current->active_mm &&
 #ifdef CONFIG_HUGETLB_PAGE
 static inline pte_t pte_mkhuge(pte_t pte)
 {
-       /*
-        * PROT_NONE needs to be remapped from the pte type to the ste type.
-        * The HW invalid bit is also different for pte and ste. The pte
-        * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
-        * bit, so we don't have to clear it.
-        */
-       if (pte_val(pte) & _PAGE_INVALID) {
-               if (pte_val(pte) & _PAGE_SWT)
-                       pte_val(pte) |= _HPAGE_TYPE_NONE;
-               pte_val(pte) |= _SEGMENT_ENTRY_INV;
-       }
-       /*
-        * Clear SW pte bits, there are no SW bits in a segment table entry.
-        */
-       pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC |
-                         _PAGE_SWR | _PAGE_SWW);
-       /*
-        * Also set the change-override bit because we don't need dirty bit
-        * tracking for hugetlbfs pages.
-        */
        pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
        return pte;
 }
        }
 }
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-
-#define SEGMENT_NONE   __pgprot(_HPAGE_TYPE_NONE)
-#define SEGMENT_RO     __pgprot(_HPAGE_TYPE_RO)
-#define SEGMENT_RW     __pgprot(_HPAGE_TYPE_RW)
-
-#define __HAVE_ARCH_PGTABLE_DEPOSIT
-extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
-
-#define __HAVE_ARCH_PGTABLE_WITHDRAW
-extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
-
-static inline int pmd_trans_splitting(pmd_t pmd)
-{
-       return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
-}
-
-static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
-                             pmd_t *pmdp, pmd_t entry)
-{
-       if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
-               pmd_val(entry) |= _SEGMENT_ENTRY_CO;
-       *pmdp = entry;
-}
-
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
 {
        /*
        return pmd;
 }
 
-static inline pmd_t pmd_mkhuge(pmd_t pmd)
+static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
 {
-       pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
-       return pmd;
+       pmd_t __pmd;
+       pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
+       return __pmd;
 }
 
 static inline pmd_t pmd_mkwrite(pmd_t pmd)
                pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
        return pmd;
 }
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+#define __HAVE_ARCH_PGTABLE_DEPOSIT
+extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
+
+#define __HAVE_ARCH_PGTABLE_WITHDRAW
+extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
+
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+       return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+                             pmd_t *pmdp, pmd_t entry)
+{
+       if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
+               pmd_val(entry) |= _SEGMENT_ENTRY_CO;
+       *pmdp = entry;
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+       pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
+       return pmd;
+}
 
 static inline pmd_t pmd_wrprotect(pmd_t pmd)
 {
        }
 }
 
-static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
-{
-       pmd_t __pmd;
-       pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
-       return __pmd;
-}
-
 #define pfn_pmd(pfn, pgprot)   mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
 #define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
 
 
        if (!ptep)
                return -ENOMEM;
 
-       pte = mk_pte(page, PAGE_RW);
+       pte_val(pte) = addr;
        for (i = 0; i < PTRS_PER_PTE; i++) {
                set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
                pte_val(pte) += PAGE_SIZE;
 
 
 #include <asm/cacheflush.h>
 #include <asm/page.h>
+#include <asm-generic/hugetlb.h>
 
 
 static inline int is_hugepage_only_range(struct mm_struct *mm,
 
 #define _ASM_SPARC64_HUGETLB_H
 
 #include <asm/page.h>
+#include <asm-generic/hugetlb.h>
 
 
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 
 #define _ASM_TILE_HUGETLB_H
 
 #include <asm/page.h>
+#include <asm-generic/hugetlb.h>
 
 
 static inline int is_hugepage_only_range(struct mm_struct *mm,
 
 #define _ASM_X86_HUGETLB_H
 
 #include <asm/page.h>
+#include <asm-generic/hugetlb.h>
 
 
 static inline int is_hugepage_only_range(struct mm_struct *mm,
 
--- /dev/null
+#ifndef _ASM_GENERIC_HUGETLB_H
+#define _ASM_GENERIC_HUGETLB_H
+
+static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
+{
+       return mk_pte(page, pgprot);
+}
+
+static inline int huge_pte_write(pte_t pte)
+{
+       return pte_write(pte);
+}
+
+static inline int huge_pte_dirty(pte_t pte)
+{
+       return pte_dirty(pte);
+}
+
+static inline pte_t huge_pte_mkwrite(pte_t pte)
+{
+       return pte_mkwrite(pte);
+}
+
+static inline pte_t huge_pte_mkdirty(pte_t pte)
+{
+       return pte_mkdirty(pte);
+}
+
+static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
+{
+       return pte_modify(pte, newprot);
+}
+
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+                                 pte_t *ptep)
+{
+       pte_clear(mm, addr, ptep);
+}
+
+#endif /* _ASM_GENERIC_HUGETLB_H */
 
        pte_t entry;
 
        if (writable) {
-               entry =
-                   pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
+               entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
+                                        vma->vm_page_prot)));
        } else {
-               entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
+               entry = huge_pte_wrprotect(mk_huge_pte(page,
+                                          vma->vm_page_prot));
        }
        entry = pte_mkyoung(entry);
        entry = pte_mkhuge(entry);
 {
        pte_t entry;
 
-       entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
+       entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
        if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
                update_mmu_cache(vma, address, ptep);
 }
                 * HWPoisoned hugepage is already unmapped and dropped reference
                 */
                if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
-                       pte_clear(mm, address, ptep);
+                       huge_pte_clear(mm, address, ptep);
                        continue;
                }
 
 
                pte = huge_ptep_get_and_clear(mm, address, ptep);
                tlb_remove_tlb_entry(tlb, ptep, address);
-               if (pte_dirty(pte))
+               if (huge_pte_dirty(pte))
                        set_page_dirty(page);
 
                page_remove_rmap(page);
         * page now as it is used to determine if a reservation has been
         * consumed.
         */
-       if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
+       if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
                if (vma_needs_reservation(h, vma, address) < 0) {
                        ret = VM_FAULT_OOM;
                        goto out_mutex;
 
 
        if (flags & FAULT_FLAG_WRITE) {
-               if (!pte_write(entry)) {
+               if (!huge_pte_write(entry)) {
                        ret = hugetlb_cow(mm, vma, address, ptep, entry,
                                                        pagecache_page);
                        goto out_page_table_lock;
                }
-               entry = pte_mkdirty(entry);
+               entry = huge_pte_mkdirty(entry);
        }
        entry = pte_mkyoung(entry);
        if (huge_ptep_set_access_flags(vma, address, ptep, entry,
                 * directly from any kind of swap entries.
                 */
                if (absent || is_swap_pte(huge_ptep_get(pte)) ||
-                   ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
+                   ((flags & FOLL_WRITE) &&
+                     !huge_pte_write(huge_ptep_get(pte)))) {
                        int ret;
 
                        spin_unlock(&mm->page_table_lock);
                }
                if (!huge_pte_none(huge_ptep_get(ptep))) {
                        pte = huge_ptep_get_and_clear(mm, address, ptep);
-                       pte = pte_mkhuge(pte_modify(pte, newprot));
+                       pte = pte_mkhuge(huge_pte_modify(pte, newprot));
                        pte = arch_make_huge_pte(pte, vma, NULL, 0);
                        set_huge_pte_at(mm, address, ptep, pte);
                        pages++;