]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
sparc64: Fix build error in flush_tsb_user_page
authorNitin Gupta <nitin.m.gupta@oracle.com>
Fri, 3 Mar 2017 19:04:00 +0000 (11:04 -0800)
committerChuck Anderson <chuck.anderson@oracle.com>
Mon, 24 Apr 2017 04:43:23 +0000 (21:43 -0700)
Patch "sparc64: Add 64K page size support"
unconditionally used __flush_huge_tsb_one_entry()
which is available only when hugetlb support is
enabled.

Another issue was incorrect TSB flushing for 64K
pages in flush_tsb_user().

Orabug: 25704426

Signed-off-by: Nitin Gupta <nitin.m.gupta@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Allen Pais <allen.pais@oracle.com>
arch/sparc/mm/hugetlbpage.c
arch/sparc/mm/tsb.c

index 28b66af09f5dc649d1719768919744291245b49b..8c8559e64f3e072a80822d58a575460dbc823da2 100644 (file)
@@ -322,7 +322,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 
        addr &= ~(size - 1);
        orig = *ptep;
-       orig_shift = pte_none(orig) ? PAGE_SIZE : huge_tte_to_shift(orig);
+       orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
 
        for (i = 0; i < nptes; i++)
                ptep[i] = __pte(pte_val(entry) + (i << shift));
@@ -348,7 +348,8 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
        else
                nptes = size >> PAGE_SHIFT;
 
-       hugepage_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
+       hugepage_shift = pte_none(entry) ? PAGE_SHIFT :
+               huge_tte_to_shift(entry);
 
        if (pte_present(entry))
                mm->context.hugetlb_pte_count -= nptes;
index d0aa02268b629f32a7d5396632fde4dc9fe4d4f2..b5d829afbda606148145cee6888db6483a123bdb 100644 (file)
@@ -121,12 +121,18 @@ void flush_tsb_user(struct tlb_batch *tb)
 
        spin_lock_irqsave(&mm->context.lock, flags);
 
-       if (tb->hugepage_shift == PAGE_SHIFT) {
+       if (tb->hugepage_shift < HPAGE_SHIFT) {
                base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
                nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
                if (tlb_type == cheetah_plus || tlb_type == hypervisor)
                        base = __pa(base);
-               __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
+               if (tb->hugepage_shift == PAGE_SHIFT)
+                       __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
+#if defined(CONFIG_HUGETLB_PAGE)
+               else
+                       __flush_huge_tsb_one(tb, PAGE_SHIFT, base, nentries,
+                                            tb->hugepage_shift);
+#endif
        }
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
        else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
@@ -153,8 +159,14 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
                nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
                if (tlb_type == cheetah_plus || tlb_type == hypervisor)
                        base = __pa(base);
-               __flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries,
-                                          hugepage_shift);
+               if (hugepage_shift == PAGE_SHIFT)
+                       __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT,
+                                             nentries);
+#if defined(CONFIG_HUGETLB_PAGE)
+               else
+                       __flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT,
+                                                  nentries, hugepage_shift);
+#endif
        }
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
        else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {