typedef struct {
        spinlock_t              lock;
        unsigned long           sparc64_ctx_val;
-       unsigned long           huge_pte_count;
+       unsigned long           hugetlb_pte_count;
+       unsigned long           thp_pte_count;
        struct tsb_config       tsb_block[MM_NUM_TSBS];
        struct hv_tsb_descr     tsb_descr[MM_NUM_TSBS];
 } mm_context_t;
 
        up_read(&mm->mmap_sem);
 
        mm_rss = get_mm_rss(mm);
-#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-       mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
+       mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
 #endif
        if (unlikely(mm_rss >
                     mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
                tsb_grow(mm, MM_TSB_BASE, mm_rss);
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-       mm_rss = mm->context.huge_pte_count;
+       mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
        if (unlikely(mm_rss >
                     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
                if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
 
        unsigned long nptes;
 
        if (!pte_present(*ptep) && pte_present(entry))
-               mm->context.huge_pte_count++;
+               mm->context.hugetlb_pte_count++;
 
        addr &= HPAGE_MASK;
 
 
        entry = *ptep;
        if (pte_present(entry))
-               mm->context.huge_pte_count--;
+               mm->context.hugetlb_pte_count--;
 
        addr &= HPAGE_MASK;
        nptes = 1 << HUGETLB_PAGE_ORDER;
 
        spin_lock_irqsave(&mm->context.lock, flags);
 
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-       if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
+       if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
+           is_hugetlb_pte(pte))
                __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
                                        address, pte_val(pte));
        else
 
 
        if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
                if (pmd_val(pmd) & _PAGE_PMD_HUGE)
-                       mm->context.huge_pte_count++;
+                       mm->context.thp_pte_count++;
                else
-                       mm->context.huge_pte_count--;
+                       mm->context.thp_pte_count--;
 
                /* Do not try to allocate the TSB hash table if we
                 * don't have one already.  We have various locks held
 
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-       unsigned long huge_pte_count;
+       unsigned long total_huge_pte_count;
 #endif
        unsigned int i;
 
        mm->context.sparc64_ctx_val = 0UL;
 
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-       /* We reset it to zero because the fork() page copying
+       /* We reset them to zero because the fork() page copying
         * will re-increment the counters as the parent PTEs are
         * copied into the child address space.
         */
-       huge_pte_count = mm->context.huge_pte_count;
-       mm->context.huge_pte_count = 0;
+       total_huge_pte_count = mm->context.hugetlb_pte_count +
+                        mm->context.thp_pte_count;
+       mm->context.hugetlb_pte_count = 0;
+       mm->context.thp_pte_count = 0;
 #endif
 
        /* copy_mm() copies over the parent's mm_struct before calling
        tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
 
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-       if (unlikely(huge_pte_count))
-               tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
+       if (unlikely(total_huge_pte_count))
+               tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count);
 #endif
 
        if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))