#define _CACHE_SHIFT                3
 #define _CACHE_MASK                 (7<<3)
 
-#else
+#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 
 #define _PAGE_PRESENT               (1<<0)  /* implemented in software */
 #define _PAGE_READ                  (1<<1)  /* implemented in software */
 #define _PAGE_MODIFIED              (1<<4)  /* implemented in software */
 #define _PAGE_FILE                  (1<<4)  /* set:pagecache unset:swap */
 
-#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
-
 #define _PAGE_GLOBAL                (1<<8)
 #define _PAGE_VALID                 (1<<9)
 #define _PAGE_SILENT_READ           (1<<9)  /* synonym                 */
 #define _CACHE_UNCACHED             (1<<11)
 #define _CACHE_MASK                 (1<<11)
 
+#else /* 'Normal' r4K case */
+/*
+ * When using the RI/XI bit support, we have 13 bits of flags below
+ * the physical address. The RI/XI bits are placed such that a SRL 5
+ * can strip off the software bits, then a ROTR 2 can move the RI/XI
+ * into bits [63:62]. This also limits physical address to 56 bits,
+ * which is more than we need right now.
+ */
+
+/* implemented in software */
+#define _PAGE_PRESENT_SHIFT    (0)
+#define _PAGE_PRESENT          (1 << _PAGE_PRESENT_SHIFT)
+/* implemented in software, should be unused if kernel_uses_smartmips_rixi. */
+#define _PAGE_READ_SHIFT       (kernel_uses_smartmips_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1)
+#define _PAGE_READ ({if (kernel_uses_smartmips_rixi) BUG(); 1 << _PAGE_READ_SHIFT; })
+/* implemented in software */
+#define _PAGE_WRITE_SHIFT      (_PAGE_READ_SHIFT + 1)
+#define _PAGE_WRITE            (1 << _PAGE_WRITE_SHIFT)
+/* implemented in software */
+#define _PAGE_ACCESSED_SHIFT   (_PAGE_WRITE_SHIFT + 1)
+#define _PAGE_ACCESSED         (1 << _PAGE_ACCESSED_SHIFT)
+/* implemented in software */
+#define _PAGE_MODIFIED_SHIFT   (_PAGE_ACCESSED_SHIFT + 1)
+#define _PAGE_MODIFIED         (1 << _PAGE_MODIFIED_SHIFT)
+/* set:pagecache unset:swap */
+#define _PAGE_FILE             (_PAGE_MODIFIED)
+
+#ifdef CONFIG_HUGETLB_PAGE
+/* huge tlb page */
+#define _PAGE_HUGE_SHIFT       (_PAGE_MODIFIED_SHIFT + 1)
+#define _PAGE_HUGE             (1 << _PAGE_HUGE_SHIFT)
 #else
+#define _PAGE_HUGE_SHIFT       (_PAGE_MODIFIED_SHIFT)
+#define _PAGE_HUGE             ({BUG(); 1; })  /* Dummy value */
+#endif
 
-#define _PAGE_R4KBUG                (1<<5)  /* workaround for r4k bug  */
-#define _PAGE_HUGE                  (1<<5)  /* huge tlb page */
-#define _PAGE_GLOBAL                (1<<6)
-#define _PAGE_VALID                 (1<<7)
-#define _PAGE_SILENT_READ           (1<<7)  /* synonym                 */
-#define _PAGE_DIRTY                 (1<<8)  /* The MIPS dirty bit      */
-#define _PAGE_SILENT_WRITE          (1<<8)
-#define _CACHE_SHIFT               9
-#define _CACHE_MASK                 (7<<9)
+/* Page cannot be executed */
+#define _PAGE_NO_EXEC_SHIFT    (kernel_uses_smartmips_rixi ? _PAGE_HUGE_SHIFT + 1 : _PAGE_HUGE_SHIFT)
+#define _PAGE_NO_EXEC          ({if (!kernel_uses_smartmips_rixi) BUG(); 1 << _PAGE_NO_EXEC_SHIFT; })
+
+/* Page cannot be read */
+#define _PAGE_NO_READ_SHIFT    (kernel_uses_smartmips_rixi ? _PAGE_NO_EXEC_SHIFT + 1 : _PAGE_NO_EXEC_SHIFT)
+#define _PAGE_NO_READ          ({if (!kernel_uses_smartmips_rixi) BUG(); 1 << _PAGE_NO_READ_SHIFT; })
+
+#define _PAGE_GLOBAL_SHIFT     (_PAGE_NO_READ_SHIFT + 1)
+#define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
+
+#define _PAGE_VALID_SHIFT      (_PAGE_GLOBAL_SHIFT + 1)
+#define _PAGE_VALID            (1 << _PAGE_VALID_SHIFT)
+/* synonym                 */
+#define _PAGE_SILENT_READ      (_PAGE_VALID)
+
+/* The MIPS dirty bit      */
+#define _PAGE_DIRTY_SHIFT      (_PAGE_VALID_SHIFT + 1)
+#define _PAGE_DIRTY            (1 << _PAGE_DIRTY_SHIFT)
+#define _PAGE_SILENT_WRITE     (_PAGE_DIRTY)
+
+#define _CACHE_SHIFT           (_PAGE_DIRTY_SHIFT + 1)
+#define _CACHE_MASK            (7 << _CACHE_SHIFT)
+
+#define _PFN_SHIFT             (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
 
-#endif
 #endif /* defined(CONFIG_64BIT_PHYS_ADDR && defined(CONFIG_CPU_MIPS32) */
 
+#ifndef _PFN_SHIFT
+#define _PFN_SHIFT                  PAGE_SHIFT
+#endif
+#define _PFN_MASK              (~((1 << (_PFN_SHIFT)) - 1))
+
+#ifndef _PAGE_NO_READ
+#define _PAGE_NO_READ ({BUG(); 0; })
+#define _PAGE_NO_READ_SHIFT ({BUG(); 0; })
+#endif
+#ifndef _PAGE_NO_EXEC
+#define _PAGE_NO_EXEC ({BUG(); 0; })
+#endif
+#ifndef _PAGE_GLOBAL_SHIFT
+#define _PAGE_GLOBAL_SHIFT ilog2(_PAGE_GLOBAL)
+#endif
+
+
+#ifndef __ASSEMBLY__
+/*
+ * pte_to_entrylo converts a page table entry (PTE) into a Mips
+ * entrylo0/1 value.
+ */
+static inline uint64_t pte_to_entrylo(unsigned long pte_val)
+{
+       if (kernel_uses_smartmips_rixi) {
+               int sa;
+#ifdef CONFIG_32BIT
+               sa = 31 - _PAGE_NO_READ_SHIFT;
+#else
+               sa = 63 - _PAGE_NO_READ_SHIFT;
+#endif
+               /*
+                * C has no way to express that this is a DSRL
+                * _PAGE_NO_EXEC_SHIFT followed by a ROTR 2.  Luckily
+                * in the fast path this is done in assembly
+                */
+               return (pte_val >> _PAGE_GLOBAL_SHIFT) |
+                       ((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa);
+       }
+
+       return pte_val >> _PAGE_GLOBAL_SHIFT;
+}
+#endif
 
 /*
  * Cache attributes
 
 #endif
 
-#define __READABLE     (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED)
+#define __READABLE     (_PAGE_SILENT_READ | _PAGE_ACCESSED | (kernel_uses_smartmips_rixi ? 0 : _PAGE_READ))
 #define __WRITEABLE    (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
 
-#define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
+#define _PAGE_CHG_MASK  (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
 
 #endif /* _ASM_PGTABLE_BITS_H */
 
 
 static inline void setup_protection_map(void)
 {
-       protection_map[0] = PAGE_NONE;
-       protection_map[1] = PAGE_READONLY;
-       protection_map[2] = PAGE_COPY;
-       protection_map[3] = PAGE_COPY;
-       protection_map[4] = PAGE_READONLY;
-       protection_map[5] = PAGE_READONLY;
-       protection_map[6] = PAGE_COPY;
-       protection_map[7] = PAGE_COPY;
-       protection_map[8] = PAGE_NONE;
-       protection_map[9] = PAGE_READONLY;
-       protection_map[10] = PAGE_SHARED;
-       protection_map[11] = PAGE_SHARED;
-       protection_map[12] = PAGE_READONLY;
-       protection_map[13] = PAGE_READONLY;
-       protection_map[14] = PAGE_SHARED;
-       protection_map[15] = PAGE_SHARED;
+       if (kernel_uses_smartmips_rixi) {
+               protection_map[0]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+               protection_map[1]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
+               protection_map[2]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+               protection_map[3]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
+               protection_map[4]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+               protection_map[5]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
+               protection_map[6]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+               protection_map[7]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
+
+               protection_map[8]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+               protection_map[9]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
+               protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
+               protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+               protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+               protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
+               protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE  | _PAGE_NO_READ);
+               protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
+
+       } else {
+               protection_map[0] = PAGE_NONE;
+               protection_map[1] = PAGE_READONLY;
+               protection_map[2] = PAGE_COPY;
+               protection_map[3] = PAGE_COPY;
+               protection_map[4] = PAGE_READONLY;
+               protection_map[5] = PAGE_READONLY;
+               protection_map[6] = PAGE_COPY;
+               protection_map[7] = PAGE_COPY;
+               protection_map[8] = PAGE_NONE;
+               protection_map[9] = PAGE_READONLY;
+               protection_map[10] = PAGE_SHARED;
+               protection_map[11] = PAGE_SHARED;
+               protection_map[12] = PAGE_READONLY;
+               protection_map[13] = PAGE_READONLY;
+               protection_map[14] = PAGE_SHARED;
+               protection_map[15] = PAGE_SHARED;
+       }
 }
 
 void __cpuinit cpu_cache_init(void)
 
        label_vmalloc_done,
        label_tlbw_hazard,
        label_split,
+       label_tlbl_goaround1,
+       label_tlbl_goaround2,
        label_nopage_tlbl,
        label_nopage_tlbs,
        label_nopage_tlbm,
 UASM_L_LA(_vmalloc_done)
 UASM_L_LA(_tlbw_hazard)
 UASM_L_LA(_split)
+UASM_L_LA(_tlbl_goaround1)
+UASM_L_LA(_tlbl_goaround2)
 UASM_L_LA(_nopage_tlbl)
 UASM_L_LA(_nopage_tlbs)
 UASM_L_LA(_nopage_tlbm)
        }
 }
 
-#ifdef CONFIG_HUGETLB_PAGE
-static __cpuinit void build_huge_tlb_write_entry(u32 **p,
-                                                struct uasm_label **l,
-                                                struct uasm_reloc **r,
-                                                unsigned int tmp,
-                                                enum tlb_write_entry wmode)
+static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
+                                                                 unsigned int reg)
 {
-       /* Set huge page tlb entry size */
-       uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
-       uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
-       uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+       if (kernel_uses_smartmips_rixi) {
+               UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
+               UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+       } else {
+#ifdef CONFIG_64BIT_PHYS_ADDR
+               uasm_i_dsrl(p, reg, reg, ilog2(_PAGE_GLOBAL));
+#else
+               UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
+#endif
+       }
+}
 
-       build_tlb_write_entry(p, l, r, wmode);
+#ifdef CONFIG_HUGETLB_PAGE
 
+static __cpuinit void build_restore_pagemask(u32 **p,
+                                            struct uasm_reloc **r,
+                                            unsigned int tmp,
+                                            enum label_id lid)
+{
        /* Reset default page size */
        if (PM_DEFAULT_MASK >> 16) {
                uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
                uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
-               uasm_il_b(p, r, label_leave);
+               uasm_il_b(p, r, lid);
                uasm_i_mtc0(p, tmp, C0_PAGEMASK);
        } else if (PM_DEFAULT_MASK) {
                uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
-               uasm_il_b(p, r, label_leave);
+               uasm_il_b(p, r, lid);
                uasm_i_mtc0(p, tmp, C0_PAGEMASK);
        } else {
-               uasm_il_b(p, r, label_leave);
+               uasm_il_b(p, r, lid);
                uasm_i_mtc0(p, 0, C0_PAGEMASK);
        }
 }
 
+static __cpuinit void build_huge_tlb_write_entry(u32 **p,
+                                                struct uasm_label **l,
+                                                struct uasm_reloc **r,
+                                                unsigned int tmp,
+                                                enum tlb_write_entry wmode)
+{
+       /* Set huge page tlb entry size */
+       uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
+       uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
+       uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+
+       build_tlb_write_entry(p, l, r, wmode);
+
+       build_restore_pagemask(p, r, tmp, label_leave);
+}
+
 /*
  * Check if Huge PTE is present, if so then jump to LABEL.
  */
        if (!small_sequence)
                uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
 
-       UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */
+       build_convert_pte_to_entrylo(p, pte);
        UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
        /* convert to entrylo1 */
        if (small_sequence)
        if (cpu_has_64bits) {
                uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
                uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
-               uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */
-               UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
-               uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */
+               if (kernel_uses_smartmips_rixi) {
+                       UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
+                       UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
+                       UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+                       UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+                       UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+               } else {
+                       uasm_i_dsrl(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
+                       UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+                       uasm_i_dsrl(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
+               }
                UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
        } else {
                int pte_off_even = sizeof(pte_t) / 2;
        UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
        if (r45k_bvahwbug())
                build_tlb_probe_entry(p);
-       UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */
-       if (r4k_250MHZhwbug())
-               UASM_i_MTC0(p, 0, C0_ENTRYLO0);
-       UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
-       UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */
-       if (r45k_bvahwbug())
-               uasm_i_mfc0(p, tmp, C0_INDEX);
+       if (kernel_uses_smartmips_rixi) {
+               UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
+               UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
+               UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+               if (r4k_250MHZhwbug())
+                       UASM_i_MTC0(p, 0, C0_ENTRYLO0);
+               UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+               UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+       } else {
+               UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
+               if (r4k_250MHZhwbug())
+                       UASM_i_MTC0(p, 0, C0_ENTRYLO0);
+               UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+               UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
+               if (r45k_bvahwbug())
+                       uasm_i_mfc0(p, tmp, C0_INDEX);
+       }
        if (r4k_250MHZhwbug())
                UASM_i_MTC0(p, 0, C0_ENTRYLO1);
        UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
 build_pte_present(u32 **p, struct uasm_reloc **r,
                  unsigned int pte, unsigned int ptr, enum label_id lid)
 {
-       uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
-       uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
-       uasm_il_bnez(p, r, pte, lid);
+       if (kernel_uses_smartmips_rixi) {
+               uasm_i_andi(p, pte, pte, _PAGE_PRESENT);
+               uasm_il_beqz(p, r, pte, lid);
+       } else {
+               uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
+               uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
+               uasm_il_bnez(p, r, pte, lid);
+       }
        iPTE_LW(p, pte, ptr);
 }
 
        build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
        if (m4kc_tlbp_war())
                build_tlb_probe_entry(&p);
+
+       if (kernel_uses_smartmips_rixi) {
+               /*
+                * If the page is not _PAGE_VALID, RI or XI could not
+                * have triggered it.  Skip the expensive test..
+                */
+               uasm_i_andi(&p, K0, K0, _PAGE_VALID);
+               uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1);
+               uasm_i_nop(&p);
+
+               uasm_i_tlbr(&p);
+               /* Examine  entrylo 0 or 1 based on ptr. */
+               uasm_i_andi(&p, K0, K1, sizeof(pte_t));
+               uasm_i_beqz(&p, K0, 8);
+
+               UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
+               UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
+               /*
+                * If the entryLo (now in K0) is valid (bit 1), RI or
+                * XI must have triggered it.
+                */
+               uasm_i_andi(&p, K0, K0, 2);
+               uasm_il_bnez(&p, &r, K0, label_nopage_tlbl);
+
+               uasm_l_tlbl_goaround1(&l, p);
+               /* Reload the PTE value */
+               iPTE_LW(&p, K0, K1);
+       }
        build_make_valid(&p, &r, K0, K1);
        build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
 
        iPTE_LW(&p, K0, K1);
        build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
        build_tlb_probe_entry(&p);
+
+       if (kernel_uses_smartmips_rixi) {
+               /*
+                * If the page is not _PAGE_VALID, RI or XI could not
+                * have triggered it.  Skip the expensive test..
+                */
+               uasm_i_andi(&p, K0, K0, _PAGE_VALID);
+               uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
+               uasm_i_nop(&p);
+
+               uasm_i_tlbr(&p);
+               /* Examine  entrylo 0 or 1 based on ptr. */
+               uasm_i_andi(&p, K0, K1, sizeof(pte_t));
+               uasm_i_beqz(&p, K0, 8);
+
+               UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
+               UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
+               /*
+                * If the entryLo (now in K0) is valid (bit 1), RI or
+                * XI must have triggered it.
+                */
+               uasm_i_andi(&p, K0, K0, 2);
+               uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
+               /* Reload the PTE value */
+               iPTE_LW(&p, K0, K1);
+
+               /*
+                * We clobbered C0_PAGEMASK, restore it.  On the other branch
+                * it is restored in build_huge_tlb_write_entry.
+                */
+               build_restore_pagemask(&p, &r, K0, label_nopage_tlbl);
+
+               uasm_l_tlbl_goaround2(&l, p);
+       }
        uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID));
        build_huge_handler_tail(&p, &r, &l, K0, K1);
 #endif