* Copyright (C) 2004, 2005, 2006, 2008  Thiemo Seufer
  * Copyright (C) 2005, 2007, 2008, 2009  Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2008, 2009 Cavium Networks, Inc.
  *
  * ... and the days got worse and worse and now you see
  * I've gone completly out of my mind.
        label_nopage_tlbm,
        label_smp_pgtable_change,
        label_r3000_write_probe_fail,
+#ifdef CONFIG_HUGETLB_PAGE
+       label_tlb_huge_update,
+#endif
 };
 
 UASM_L_LA(_second_part)
 UASM_L_LA(_nopage_tlbm)
 UASM_L_LA(_smp_pgtable_change)
 UASM_L_LA(_r3000_write_probe_fail)
+#ifdef CONFIG_HUGETLB_PAGE
+UASM_L_LA(_tlb_huge_update)
+#endif
 
 /*
  * For debug purposes.
 #define C0_TCBIND      2, 2
 #define C0_ENTRYLO1    3, 0
 #define C0_CONTEXT     4, 0
+#define C0_PAGEMASK    5, 0
 #define C0_BADVADDR    8, 0
 #define C0_ENTRYHI     10, 0
 #define C0_EPC         14, 0
        }
 }
 
+#ifdef CONFIG_HUGETLB_PAGE
+static __cpuinit void build_huge_tlb_write_entry(u32 **p,
+                                                struct uasm_label **l,
+                                                struct uasm_reloc **r,
+                                                unsigned int tmp,
+                                                enum tlb_write_entry wmode)
+{
+       /* Set huge page tlb entry size */
+       uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
+       uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
+       uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+
+       build_tlb_write_entry(p, l, r, wmode);
+
+       /* Reset default page size */
+       if (PM_DEFAULT_MASK >> 16) {
+               uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
+               uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
+               uasm_il_b(p, r, label_leave);
+               uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+       } else if (PM_DEFAULT_MASK) {
+               uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
+               uasm_il_b(p, r, label_leave);
+               uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+       } else {
+               uasm_il_b(p, r, label_leave);
+               uasm_i_mtc0(p, 0, C0_PAGEMASK);
+       }
+}
+
+/*
+ * Check if Huge PTE is present, if so then jump to LABEL.
+ */
+static void __cpuinit
+build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
+               unsigned int pmd, int lid)
+{
+       UASM_i_LW(p, tmp, 0, pmd);
+       uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
+       uasm_il_bnez(p, r, tmp, lid);
+}
+
+static __cpuinit void build_huge_update_entries(u32 **p,
+                                               unsigned int pte,
+                                               unsigned int tmp)
+{
+       int small_sequence;
+
+       /*
+        * A huge PTE describes an area the size of the
+        * configured huge page size. This is twice the
+        * of the large TLB entry size we intend to use.
+        * A TLB entry half the size of the configured
+        * huge page size is configured into entrylo0
+        * and entrylo1 to cover the contiguous huge PTE
+        * address space.
+        */
+       small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
+
+       /* We can clobber tmp.  It isn't used after this.*/
+       if (!small_sequence)
+               uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
+
+       UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */
+       uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */
+       /* convert to entrylo1 */
+       if (small_sequence)
+               UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
+       else
+               UASM_i_ADDU(p, pte, pte, tmp);
+
+       uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */
+}
+
+static __cpuinit void build_huge_handler_tail(u32 **p,
+                                             struct uasm_reloc **r,
+                                             struct uasm_label **l,
+                                             unsigned int pte,
+                                             unsigned int ptr)
+{
+#ifdef CONFIG_SMP
+       UASM_i_SC(p, pte, 0, ptr);
+       uasm_il_beqz(p, r, pte, label_tlb_huge_update);
+       UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
+#else
+       UASM_i_SW(p, pte, 0, ptr);
+#endif
+       build_huge_update_entries(p, pte, ptr);
+       build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed);
+}
+#endif /* CONFIG_HUGETLB_PAGE */
+
 #ifdef CONFIG_64BIT
 /*
  * TMP and PTR are scratch.
        build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
 #endif
 
+#ifdef CONFIG_HUGETLB_PAGE
+       build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
+#endif
+
        build_get_ptep(&p, K0, K1);
        build_update_entries(&p, K0, K1);
        build_tlb_write_entry(&p, &l, &r, tlb_random);
        uasm_l_leave(&l, p);
        uasm_i_eret(&p); /* return from trap */
 
+#ifdef CONFIG_HUGETLB_PAGE
+       uasm_l_tlb_huge_update(&l, p);
+       UASM_i_LW(&p, K0, 0, K1);
+       build_huge_update_entries(&p, K0, K1);
+       build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random);
+#endif
+
 #ifdef CONFIG_64BIT
        build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
 #endif
                uasm_copy_handler(relocs, labels, tlb_handler, p, f);
                final_len = p - tlb_handler;
        } else {
-#ifdef MODULE_START
+#if defined(CONFIG_HUGETLB_PAGE)
+               const enum label_id ls = label_tlb_huge_update;
+#elif defined(MODULE_START)
                const enum label_id ls = label_module_alloc;
 #else
                const enum label_id ls = label_vmalloc;
        build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
 #endif
 
+#ifdef CONFIG_HUGETLB_PAGE
+       /*
+        * For huge tlb entries, pmd doesn't contain an address but
+        * instead contains the tlb pte. Check the PAGE_HUGE bit and
+        * see if we need to jump to huge tlb processing.
+        */
+       build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update);
+#endif
+
        UASM_i_MFC0(p, pte, C0_BADVADDR);
        UASM_i_LW(p, ptr, 0, ptr);
        UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
        build_make_valid(&p, &r, K0, K1);
        build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
 
+#ifdef CONFIG_HUGETLB_PAGE
+       /*
+        * This is the entry point when build_r4000_tlbchange_handler_head
+        * spots a huge page.
+        */
+       uasm_l_tlb_huge_update(&l, p);
+       iPTE_LW(&p, K0, K1);
+       build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
+       build_tlb_probe_entry(&p);
+       uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID));
+       build_huge_handler_tail(&p, &r, &l, K0, K1);
+#endif
+
        uasm_l_nopage_tlbl(&l, p);
        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
        uasm_i_nop(&p);
        build_make_write(&p, &r, K0, K1);
        build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
 
+#ifdef CONFIG_HUGETLB_PAGE
+       /*
+        * This is the entry point when
+        * build_r4000_tlbchange_handler_head spots a huge page.
+        */
+       uasm_l_tlb_huge_update(&l, p);
+       iPTE_LW(&p, K0, K1);
+       build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
+       build_tlb_probe_entry(&p);
+       uasm_i_ori(&p, K0, K0,
+                  _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
+       build_huge_handler_tail(&p, &r, &l, K0, K1);
+#endif
+
        uasm_l_nopage_tlbs(&l, p);
        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
        uasm_i_nop(&p);
        build_make_write(&p, &r, K0, K1);
        build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
 
+#ifdef CONFIG_HUGETLB_PAGE
+       /*
+        * This is the entry point when
+        * build_r4000_tlbchange_handler_head spots a huge page.
+        */
+       uasm_l_tlb_huge_update(&l, p);
+       iPTE_LW(&p, K0, K1);
+       build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
+       build_tlb_probe_entry(&p);
+       uasm_i_ori(&p, K0, K0,
+                  _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
+       build_huge_handler_tail(&p, &r, &l, K0, K1);
+#endif
+
        uasm_l_nopage_tlbm(&l, p);
        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
        uasm_i_nop(&p);