#define __HAVE_ARCH_PGD_FREE
 #include <asm-generic/pgalloc.h>
 
-/* Allocate the top level pgd (page directory)
- *
- * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
- * allocate the first pmd adjacent to the pgd.  This means that we can
- * subtract a constant offset to get to it.  The pmd and pgd sizes are
- * arranged so that a single pmd covers 4GB (giving a full 64-bit
- * process access to 8TB) so our lookups are effectively L2 for the
- * first 4GB of the kernel (i.e. for all ILP32 processes and all the
- * kernel for machines with under 4GB of memory) */
+/* Allocate the top level pgd (page directory) */
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-       pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
-                                              PGD_ALLOC_ORDER);
-       pgd_t *actual_pgd = pgd;
+       pgd_t *pgd;
 
-       if (likely(pgd != NULL)) {
-               memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
-#if CONFIG_PGTABLE_LEVELS == 3
-               actual_pgd += PTRS_PER_PGD;
-               /* Populate first pmd with allocated memory.  We mark it
-                * with PxD_FLAG_ATTACHED as a signal to the system that this
-                * pmd entry may not be cleared. */
-               set_pgd(actual_pgd, __pgd((PxD_FLAG_PRESENT |
-                                       PxD_FLAG_VALID |
-                                       PxD_FLAG_ATTACHED)
-                       + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)));
-               /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
-                * a signal that this pmd may not be freed */
-               set_pgd(pgd, __pgd(PxD_FLAG_ATTACHED));
-#endif
-       }
-       spin_lock_init(pgd_spinlock(actual_pgd));
-       return actual_pgd;
+       pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+       if (unlikely(pgd == NULL))
+               return NULL;
+
+       memset(pgd, 0, PAGE_SIZE << PGD_ORDER);
+
+       return pgd;
 }
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
-#if CONFIG_PGTABLE_LEVELS == 3
-       pgd -= PTRS_PER_PGD;
-#endif
-       free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
+       free_pages((unsigned long)pgd, PGD_ORDER);
 }
 
 #if CONFIG_PGTABLE_LEVELS == 3
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-       return (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER);
+       pmd_t *pmd;
+
+       pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER);
+       if (likely(pmd))
+               memset ((void *)pmd, 0, PAGE_SIZE << PMD_ORDER);
+       return pmd;
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 {
-       if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
-               /*
-                * This is the permanent pmd attached to the pgd;
-                * cannot free it.
-                * Increment the counter to compensate for the decrement
-                * done by generic mm code.
-                */
-               mm_inc_nr_pmds(mm);
-               return;
-       }
        free_pages((unsigned long)pmd, PMD_ORDER);
 }
-
 #endif
 
 static inline void
 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
 {
-#if CONFIG_PGTABLE_LEVELS == 3
-       /* preserve the gateway marker if this is the beginning of
-        * the permanent pmd */
-       if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
-               set_pmd(pmd, __pmd((PxD_FLAG_PRESENT |
-                               PxD_FLAG_VALID |
-                               PxD_FLAG_ATTACHED)
-                       + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
-       else
-#endif
-               set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID)
-                       + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
+       set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID)
+               + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
 }
 
 #define pmd_populate(mm, pmd, pte_page) \
 
 #include <asm/processor.h>
 #include <asm/cache.h>
 
-static inline spinlock_t *pgd_spinlock(pgd_t *);
-
 /*
  * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
  * memory.  For the return value to be meaningful, ADDR must be >=
 
 /* This is for the serialization of PxTLB broadcasts. At least on the N class
  * systems, only one PxTLB inter processor broadcast can be active at any one
- * time on the Merced bus.
-
- * PTE updates are protected by locks in the PMD.
- */
+ * time on the Merced bus. */
 extern spinlock_t pa_tlb_flush_lock;
-extern spinlock_t pa_swapper_pg_lock;
 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
 extern int pa_serialize_tlb_flushes;
 #else
  * within a page table are directly modified.  Thus, the following
  * hook is made available.
  */
-#define set_pte(pteptr, pteval)                                 \
-        do{                                                     \
-                *(pteptr) = (pteval);                           \
-        } while(0)
-
-#define set_pte_at(mm, addr, ptep, pteval)                     \
-       do {                                                    \
-               unsigned long flags;                            \
-               spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\
-               set_pte(ptep, pteval);                          \
-               purge_tlb_entries(mm, addr);                    \
-               spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\
+#define set_pte(pteptr, pteval)                        \
+       do {                                    \
+               *(pteptr) = (pteval);           \
+               barrier();                      \
+       } while(0)
+
+#define set_pte_at(mm, addr, pteptr, pteval)   \
+       do {                                    \
+               *(pteptr) = (pteval);           \
+               purge_tlb_entries(mm, addr);    \
        } while (0)
 
 #endif /* !__ASSEMBLY__ */
 #define KERNEL_INITIAL_SIZE    (1 << KERNEL_INITIAL_ORDER)
 
 #if CONFIG_PGTABLE_LEVELS == 3
-#define PGD_ORDER      1 /* Number of pages per pgd */
-#define PMD_ORDER      1 /* Number of pages per pmd */
-#define PGD_ALLOC_ORDER        (2 + 1) /* first pgd contains pmd */
+#define PMD_ORDER      1
+#define PGD_ORDER      0
 #else
-#define PGD_ORDER      1 /* Number of pages per pgd */
-#define PGD_ALLOC_ORDER        (PGD_ORDER + 1)
+#define PGD_ORDER      1
 #endif
 
 /* Definitions for 3rd level (we use PLD here for Page Lower directory
  * able to effectively address 40/42/44-bits of physical address space
  * depending on 4k/16k/64k PAGE_SIZE */
 #define _PxD_PRESENT_BIT   31
-#define _PxD_ATTACHED_BIT  30
-#define _PxD_VALID_BIT     29
+#define _PxD_VALID_BIT     30
 
 #define PxD_FLAG_PRESENT  (1 << xlate_pabit(_PxD_PRESENT_BIT))
-#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT))
 #define PxD_FLAG_VALID    (1 << xlate_pabit(_PxD_VALID_BIT))
 #define PxD_FLAG_MASK     (0xf)
 #define PxD_FLAG_SHIFT    (4)
 #define pgd_flag(x)    (pgd_val(x) & PxD_FLAG_MASK)
 #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
 
-#if CONFIG_PGTABLE_LEVELS == 3
-/* The first entry of the permanent pmd is not there if it contains
- * the gateway marker */
-#define pmd_none(x)    (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
-#else
 #define pmd_none(x)    (!pmd_val(x))
-#endif
 #define pmd_bad(x)     (!(pmd_flag(x) & PxD_FLAG_VALID))
 #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
 static inline void pmd_clear(pmd_t *pmd) {
-#if CONFIG_PGTABLE_LEVELS == 3
-       if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
-               /* This is the entry pointing to the permanent pmd
-                * attached to the pgd; cannot clear it */
-               set_pmd(pmd, __pmd(PxD_FLAG_ATTACHED));
-       else
-#endif
                set_pmd(pmd,  __pmd(0));
 }
 
 #define pud_bad(x)      (!(pud_flag(x) & PxD_FLAG_VALID))
 #define pud_present(x)  (pud_flag(x) & PxD_FLAG_PRESENT)
 static inline void pud_clear(pud_t *pud) {
-#if CONFIG_PGTABLE_LEVELS == 3
-       if(pud_flag(*pud) & PxD_FLAG_ATTACHED)
-               /* This is the permanent pmd attached to the pud; cannot
-                * free it */
-               return;
-#endif
        set_pud(pud, __pud(0));
 }
 #endif
 #define __pte_to_swp_entry(pte)                ((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)          ((pte_t) { (x).val })
 
-
-static inline spinlock_t *pgd_spinlock(pgd_t *pgd)
-{
-       if (unlikely(pgd == swapper_pg_dir))
-               return &pa_swapper_pg_lock;
-       return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1)));
-}
-
-
 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
 {
        pte_t pte;
-       unsigned long flags;
 
        if (!pte_young(*ptep))
                return 0;
 
-       spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags);
        pte = *ptep;
        if (!pte_young(pte)) {
-               spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
                return 0;
        }
-       set_pte(ptep, pte_mkold(pte));
-       purge_tlb_entries(vma->vm_mm, addr);
-       spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
+       set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
        return 1;
 }
 
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
        pte_t old_pte;
-       unsigned long flags;
 
-       spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
        old_pte = *ptep;
-       set_pte(ptep, __pte(0));
-       purge_tlb_entries(mm, addr);
-       spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
+       set_pte_at(mm, addr, ptep, __pte(0));
 
        return old_pte;
 }
 
 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
-       unsigned long flags;
-       spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
-       set_pte(ptep, pte_wrprotect(*ptep));
-       purge_tlb_entries(mm, addr);
-       spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
+       set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep));
 }
 
 #define pte_same(A,B)  (pte_val(A) == pte_val(B))
 
        .level 2.0
 #endif
 
-       .import         pa_tlb_lock,data
-       .macro  load_pa_tlb_lock reg
-       mfctl           %cr25,\reg
-       addil           L%(PAGE_SIZE << (PGD_ALLOC_ORDER - 1)),\reg
+       /* Get aligned page_table_lock address for this mm from cr28/tr4 */
+       .macro  get_ptl reg
+       mfctl   %cr28,\reg
        .endm
 
        /* space_to_prot macro creates a prot id from a space id */
 # endif
 #endif
        dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
+#if CONFIG_PGTABLE_LEVELS < 3
        copy            %r0,\pte
+#endif
        ldw,s           \index(\pmd),\pmd
        bb,>=,n         \pmd,_PxD_PRESENT_BIT,\fault
        dep             %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
        shladd          \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
        .endm
 
-       /* Look up PTE in a 3-Level scheme.
-        *
-        * Here we implement a Hybrid L2/L3 scheme: we allocate the
-        * first pmd adjacent to the pgd.  This means that we can
-        * subtract a constant offset to get to it.  The pmd and pgd
-        * sizes are arranged so that a single pmd covers 4GB (giving
-        * a full LP64 process access to 8TB) so our lookups are
-        * effectively L2 for the first 4GB of the kernel (i.e. for
-        * all ILP32 processes and all the kernel for machines with
-        * under 4GB of memory) */
+       /* Look up PTE in a 3-Level scheme. */
        .macro          L3_ptep pgd,pte,index,va,fault
-#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
+#if CONFIG_PGTABLE_LEVELS == 3
+       copy            %r0,\pte
        extrd,u         \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
-       extrd,u,*=      \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
        ldw,s           \index(\pgd),\pgd
-       extrd,u,*=      \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
        bb,>=,n         \pgd,_PxD_PRESENT_BIT,\fault
-       extrd,u,*=      \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
-       shld            \pgd,PxD_VALUE_SHIFT,\index
-       extrd,u,*=      \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
-       copy            \index,\pgd
-       extrd,u,*<>     \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
-       ldo             ASM_PGD_PMD_OFFSET(\pgd),\pgd
+       shld            \pgd,PxD_VALUE_SHIFT,\pgd
 #endif
        L2_ptep         \pgd,\pte,\index,\va,\fault
        .endm
 
-       /* Acquire pa_tlb_lock lock and check page is present. */
-       .macro          tlb_lock        spc,ptp,pte,tmp,tmp1,fault
-#ifdef CONFIG_SMP
+       /* Acquire page_table_lock and check page is present. */
+       .macro          ptl_lock        spc,ptp,pte,tmp,tmp1,fault
+#ifdef CONFIG_TLB_PTLOCK
 98:    cmpib,COND(=),n 0,\spc,2f
-       load_pa_tlb_lock \tmp
+       get_ptl         \tmp
 1:     LDCW            0(\tmp),\tmp1
        cmpib,COND(=)   0,\tmp1,1b
        nop
 3:
        .endm
 
-       /* Release pa_tlb_lock lock without reloading lock address.
+       /* Release page_table_lock without reloading lock address.
           Note that the values in the register spc are limited to
           NR_SPACE_IDS (262144). Thus, the stw instruction always
           stores a nonzero value even when register spc is 64 bits.
           We use an ordered store to ensure all prior accesses are
           performed prior to releasing the lock. */
-       .macro          tlb_unlock0     spc,tmp
-#ifdef CONFIG_SMP
+       .macro          ptl_unlock0     spc,tmp
+#ifdef CONFIG_TLB_PTLOCK
 98:    or,COND(=)      %r0,\spc,%r0
        stw,ma          \spc,0(\tmp)
 99:    ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
 #endif
        .endm
 
-       /* Release pa_tlb_lock lock. */
-       .macro          tlb_unlock1     spc,tmp
-#ifdef CONFIG_SMP
-98:    load_pa_tlb_lock \tmp
+       /* Release page_table_lock. */
+       .macro          ptl_unlock1     spc,tmp
+#ifdef CONFIG_TLB_PTLOCK
+98:    get_ptl         \tmp
+       ptl_unlock0     \spc,\tmp
 99:    ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
-       tlb_unlock0     \spc,\tmp
 #endif
        .endm
 
 
        L3_ptep         ptp,pte,t0,va,dtlb_check_alias_20w
 
-       tlb_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_20w
+       ptl_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_20w
        update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot,t1
        
        idtlbt          pte,prot
 
-       tlb_unlock1     spc,t0
+       ptl_unlock1     spc,t0
        rfir
        nop
 
 
        L3_ptep         ptp,pte,t0,va,nadtlb_check_alias_20w
 
-       tlb_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
+       ptl_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
        update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot,t1
 
        idtlbt          pte,prot
 
-       tlb_unlock1     spc,t0
+       ptl_unlock1     spc,t0
        rfir
        nop
 
 
        L2_ptep         ptp,pte,t0,va,dtlb_check_alias_11
 
-       tlb_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_11
+       ptl_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_11
        update_accessed ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
        mtsp            t1, %sr1        /* Restore sr1 */
 
-       tlb_unlock1     spc,t0
+       ptl_unlock1     spc,t0
        rfir
        nop
 
 
        L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_11
 
-       tlb_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_11
+       ptl_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_11
        update_accessed ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
        mtsp            t1, %sr1        /* Restore sr1 */
 
-       tlb_unlock1     spc,t0
+       ptl_unlock1     spc,t0
        rfir
        nop
 
 
        L2_ptep         ptp,pte,t0,va,dtlb_check_alias_20
 
-       tlb_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_20
+       ptl_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_20
        update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot,t1
 
        idtlbt          pte,prot
 
-       tlb_unlock1     spc,t0
+       ptl_unlock1     spc,t0
        rfir
        nop
 
 
        L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_20
 
-       tlb_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_20
+       ptl_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_20
        update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot,t1
        
        idtlbt          pte,prot
 
-       tlb_unlock1     spc,t0
+       ptl_unlock1     spc,t0
        rfir
        nop
 
 
        L3_ptep         ptp,pte,t0,va,itlb_fault
 
-       tlb_lock        spc,ptp,pte,t0,t1,itlb_fault
+       ptl_lock        spc,ptp,pte,t0,t1,itlb_fault
        update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot,t1
        
        iitlbt          pte,prot
 
-       tlb_unlock1     spc,t0
+       ptl_unlock1     spc,t0
        rfir
        nop
 
 
        L3_ptep         ptp,pte,t0,va,naitlb_check_alias_20w
 
-       tlb_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_20w
+       ptl_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_20w
        update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot,t1
 
        iitlbt          pte,prot
 
-       tlb_unlock1     spc,t0
+       ptl_unlock1     spc,t0
        rfir
        nop
 
 
        L2_ptep         ptp,pte,t0,va,itlb_fault
 
-       tlb_lock        spc,ptp,pte,t0,t1,itlb_fault
+       ptl_lock        spc,ptp,pte,t0,t1,itlb_fault
        update_accessed ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
        mtsp            t1, %sr1        /* Restore sr1 */
 
-       tlb_unlock1     spc,t0
+       ptl_unlock1     spc,t0
        rfir
        nop
 
 
        L2_ptep         ptp,pte,t0,va,naitlb_check_alias_11
 
-       tlb_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_11
+       ptl_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_11
        update_accessed ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
        mtsp            t1, %sr1        /* Restore sr1 */
 
-       tlb_unlock1     spc,t0
+       ptl_unlock1     spc,t0
        rfir
        nop
 
 
        L2_ptep         ptp,pte,t0,va,itlb_fault
 
-       tlb_lock        spc,ptp,pte,t0,t1,itlb_fault
+       ptl_lock        spc,ptp,pte,t0,t1,itlb_fault
        update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot,t1
 
        iitlbt          pte,prot
 
-       tlb_unlock1     spc,t0
+       ptl_unlock1     spc,t0
        rfir
        nop
 
 
        L2_ptep         ptp,pte,t0,va,naitlb_check_alias_20
 
-       tlb_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_20
+       ptl_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_20
        update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot,t1
 
        iitlbt          pte,prot
 
-       tlb_unlock1     spc,t0
+       ptl_unlock1     spc,t0
        rfir
        nop
 
 
        L3_ptep         ptp,pte,t0,va,dbit_fault
 
-       tlb_lock        spc,ptp,pte,t0,t1,dbit_fault
+       ptl_lock        spc,ptp,pte,t0,t1,dbit_fault
        update_dirty    ptp,pte,t1
 
        make_insert_tlb spc,pte,prot,t1
                
        idtlbt          pte,prot
 
-       tlb_unlock0     spc,t0
+       ptl_unlock0     spc,t0
        rfir
        nop
 #else
 
        L2_ptep         ptp,pte,t0,va,dbit_fault
 
-       tlb_lock        spc,ptp,pte,t0,t1,dbit_fault
+       ptl_lock        spc,ptp,pte,t0,t1,dbit_fault
        update_dirty    ptp,pte,t1
 
        make_insert_tlb_11      spc,pte,prot
 
        mtsp            t1, %sr1     /* Restore sr1 */
 
-       tlb_unlock0     spc,t0
+       ptl_unlock0     spc,t0
        rfir
        nop
 
 
        L2_ptep         ptp,pte,t0,va,dbit_fault
 
-       tlb_lock        spc,ptp,pte,t0,t1,dbit_fault
+       ptl_lock        spc,ptp,pte,t0,t1,dbit_fault
        update_dirty    ptp,pte,t1
 
        make_insert_tlb spc,pte,prot,t1
        
        idtlbt          pte,prot
 
-       tlb_unlock0     spc,t0
+       ptl_unlock0     spc,t0
        rfir
        nop
 #endif