extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
                                 pmd_t *pmd);
 
-/* Generic variants assume pgtable_t is struct page *, hence need for these */
-#define __HAVE_ARCH_PGTABLE_DEPOSIT
-extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
-                                      pgtable_t pgtable);
-
-#define __HAVE_ARCH_PGTABLE_WITHDRAW
-extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
-
 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
 extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
                                unsigned long end);
 
        set_pmd(pmd, __pmd((unsigned long)pte));
 }
 
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page)
 {
-       set_pmd(pmd, __pmd((unsigned long)pte));
-}
-
-static inline int __get_order_pgd(void)
-{
-       return get_order(PTRS_PER_PGD * sizeof(pgd_t));
+       set_pmd(pmd, __pmd((unsigned long)page_address(pte_page)));
 }
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-       int num, num2;
-       pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
+       pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL);
 
        if (ret) {
+               int num, num2;
                num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
                memzero(ret, num * sizeof(pgd_t));
 
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
-       free_pages((unsigned long)pgd, __get_order_pgd());
-}
-
-
-/*
- * With software-only page-tables, addr-split for traversal is tweakable and
- * that directly governs how big tables would be at each level.
- * Further, the MMU page size is configurable.
- * Thus we need to programatically assert the size constraint
- * All of this is const math, allowing gcc to do constant folding/propagation.
- */
-
-static inline int __get_order_pte(void)
-{
-       return get_order(PTRS_PER_PTE * sizeof(pte_t));
+       free_page((unsigned long)pgd);
 }
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
 {
        pte_t *pte;
 
-       pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
-                                        __get_order_pte());
+       pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_ZERO);
 
        return pte;
 }
 
-static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
 {
-       pgtable_t pte_pg;
        struct page *page;
 
-       pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
-       if (!pte_pg)
-               return 0;
-       memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
-       page = virt_to_page(pte_pg);
+       page = (pgtable_t)alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
+       if (!page)
+               return NULL;
+
        if (!pgtable_pte_page_ctor(page)) {
                __free_page(page);
-               return 0;
+               return NULL;
        }
 
-       return pte_pg;
+       return page;
 }
 
 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 {
-       free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
+       free_page((unsigned long)pte);
 }
 
-static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte_page)
 {
-       pgtable_pte_page_dtor(virt_to_page(ptep));
-       free_pages((unsigned long)ptep, __get_order_pte());
+       pgtable_pte_page_dtor(pte_page);
+       __free_page(pte_page);
 }
 
 #define __pte_free_tlb(tlb, pte, addr)  pte_free((tlb)->mm, pte)
 
        update_mmu_cache(vma, addr, &pte);
 }
 
-void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
-                               pgtable_t pgtable)
-{
-       struct list_head *lh = (struct list_head *) pgtable;
-
-       assert_spin_locked(&mm->page_table_lock);
-
-       /* FIFO */
-       if (!pmd_huge_pte(mm, pmdp))
-               INIT_LIST_HEAD(lh);
-       else
-               list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
-       pmd_huge_pte(mm, pmdp) = pgtable;
-}
-
-pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
-{
-       struct list_head *lh;
-       pgtable_t pgtable;
-
-       assert_spin_locked(&mm->page_table_lock);
-
-       pgtable = pmd_huge_pte(mm, pmdp);
-       lh = (struct list_head *) pgtable;
-       if (list_empty(lh))
-               pmd_huge_pte(mm, pmdp) = NULL;
-       else {
-               pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
-               list_del(lh);
-       }
-
-       pte_val(pgtable[0]) = 0;
-       pte_val(pgtable[1]) = 0;
-
-       return pgtable;
-}
-
 void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
                               unsigned long end)
 {