]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
sparc64: Trim page tables for 2G pages
authorNitin Gupta <nitin.m.gupta@oracle.com>
Thu, 2 Jun 2016 22:14:42 +0000 (15:14 -0700)
committerAllen Pais <allen.pais@oracle.com>
Thu, 15 Sep 2016 06:57:57 +0000 (12:27 +0530)
Currently mapping a 2G page requires 256*1024 PTE entries.
This results in large amounts of RAM to be used just for
storing page tables. We now use 256 PMD entries to map a
2G page which is much more space efficient.

Orabug: 23109070

Signed-off-by: Nitin Gupta <nitin.m.gupta@oracle.com>
(cherry picked from commit d3c88b8f27645c14cbb220570e5945abb0989d19)
(cherry picked from commit 768096d7916fefc497f397b0675455a754ee8a5b)
Signed-off-by: Allen Pais <allen.pais@oracle.com>
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/tsb.h
arch/sparc/kernel/sun4v_tlb_miss.S
arch/sparc/kernel/tsb.S
arch/sparc/mm/fault_64.c
arch/sparc/mm/hugetlbpage.c

index e0af5d3dc36b50e402661f227eb9e87654fc0bad..bc7fc3b36a6d8b4311db3ab83cc977428b18bf17 100644 (file)
@@ -401,7 +401,7 @@ static inline unsigned long __pte_default_huge_mask(void)
 
 static inline pte_t pte_mkhuge(pte_t pte)
 {
-       return __pte(pte_val(pte) | __pte_default_huge_mask());
+       return pte;
 }
 
 static inline bool is_default_hugetlb_pte(pte_t pte)
index 131503e36fb0864602f7028cd8c21a89197ebe50..18d8f3c20d104edbb2caf685f3bc8612f31cf4f0 100644 (file)
@@ -199,23 +199,16 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
         * if it is a HUGE PMD or a normal one.  If it is not valid
         * then jump to FAIL_LABEL.  If it is a HUGE PMD, and it
         * translates to a valid PTE, branch to PTE_LABEL.
-        *
-        * We have to propagate the 4MB bit of the virtual address
-        * because we are fabricating 8MB pages using 4MB hw pages.
         */
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
        brz,pn          REG1, FAIL_LABEL;               \
         sethi          %uhi(_PAGE_PMD_HUGE), REG2;     \
        sllx            REG2, 32, REG2;                 \
-       andcc           REG1, REG2, %g0;                \
        be,pt           %xcc, 700f;                     \
-        sethi          %hi(4 * 1024 * 1024), REG2;     \
-       brgez,pn        REG1, FAIL_LABEL;               \
-        andn           REG1, REG2, REG1;               \
-       and             VADDR, REG2, REG2;              \
+        andcc          REG1, REG2, %g0;                \
        brlz,pt         REG1, PTE_LABEL;                \
-        or             REG1, REG2, REG1;               \
+        andn           REG1, REG2, REG1;               \
 700:
 #else
 #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
index 1d9607832a5966b20732b9a92602d01bb7def4a0..d95b421d11ec69d3d9897516a1d48dd73997e58f 100644 (file)
@@ -223,11 +223,13 @@ sun4v_tsb_miss_common:
         */
        .global sun4v_xl_hugepages
 sun4v_xl_hugepages:
-       andcc   %g5, _PAGE_E_4V, %g0
+       sethi   %uhi(_PAGE_SPECIAL_4V), %g2
+       sllx    %g2, 32, %g2
+       andcc   %g5, %g2, %g0
        be,pt   %xcc, 10f
-       sethi   %uhi(_PAGE_VALID), %g1;
+       sethi   %uhi(_PAGE_VALID | _PAGE_SPECIAL_4V), %g1;
        sllx    %g1, 32, %g1
-       or      %g1, _PAGE_SZALL_4V | _PAGE_E_4V, %g1
+       or      %g1, _PAGE_SZALL_4V, %g1
        andn    %g5, %g1, %g1
        ldxa    [%g1 + %g0] ASI_PHYS_USE_EC, %g5
        brgez,pn %g5, tsb_do_fault
index e8474f8c2c47e5986f43fb9837eaef859daf8013..d1ebd7563fa4eb89e54a67092c44800a7abc75e4 100644 (file)
@@ -156,6 +156,15 @@ tsb_miss_page_table_walk_sun4v_fastpath:
        bne,pt          %xcc, 60f
         nop
 
+       /* It is the default huge page (8M). We have to propagate
+        * the 4MB bit of the virtual address because we are
+        * fabricating 8MB pages using 4MB hw pages.
+        */
+       sethi           %hi(4 * 1024 * 1024), %g2
+       andn            %g5, %g2, %g5
+       and             %g4, %g2, %g2
+       or              %g5, %g2, %g5
+
        /* It is a huge page, use huge page TSB entry address we
         * calculated above.  If the huge page TSB has not been
         * allocated, setup a trap stack and call hugetlb_setup()
index c911762ae6759f2d40d67b147d4685d701434c5d..f1a78f7aa650010b0cb278244fb4526b1b9cc7cf 100644 (file)
@@ -115,11 +115,15 @@ static unsigned int get_user_insn(unsigned long tpc)
 
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
        if (is_hugetlb_pmd(*pmdp)) {
+               unsigned long hpage_mask = HPAGE_MASK;
+
+               if (xl_hugepage_shift == XLHPAGE_2GB_SHIFT)
+                       hpage_mask = ~((1UL << xl_hugepage_shift) - 1);
                if (pmd_trans_splitting(*pmdp))
                        goto out_irq_enable;
 
                pa  = pmd_pfn(*pmdp) << PAGE_SHIFT;
-               pa += tpc & ~HPAGE_MASK;
+               pa += tpc & ~hpage_mask;
 
                /* Use phys bypass so we don't pollute dtlb/dcache. */
                __asm__ __volatile__("lduwa [%1] %2, %0"
index 16a3b1b3e7ba4049641eb6b1db78243ceb304c74..b4e737416750ef615cbc3fb9a45a9031078759a5 100644 (file)
@@ -173,7 +173,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
 
                if (!pmd)
                        goto fail;
-               if (size == HPAGE_SIZE) {
+               if (size != (1UL << XLHPAGE_16GB_SHIFT)) {
                        rpte = (pte_t *)pmd;
                        break;
                }
@@ -275,6 +275,7 @@ static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int hugepage_shift)
        /* 2Gb */
        case XLHPAGE_2GB_SHIFT:
                sun4v_hugepage_size = _PAGE_SZ2GB_4V;
+               pte_val(entry) |= _PAGE_PMD_HUGE;
                break;
        /* 8Mb */
        case HPAGE_SHIFT:
@@ -312,9 +313,11 @@ static void huge_pte_at_flush_update(struct mm_struct *mm, unsigned long addr,
 
 static void form_sentinel(pte_t *sentinel_pte, pte_t entry, pte_t *pte)
 {
-       pte_t sentinel = __pte(_PAGE_VALID | _PAGE_E_4V |
-               (pte_val(entry) & _PAGE_SZALL_4V) | __pa(pte));
+       pte_t sentinel = __pte(_PAGE_VALID | _PAGE_SPECIAL_4V |
+               _PAGE_PMD_HUGE | (pte_val(entry) & _PAGE_SZALL_4V) |
+               __pa(pte));
 
+       BUG_ON(__pa(pte) & _PAGE_SZALL_4V);
        *sentinel_pte = sentinel;
 }
 
@@ -327,7 +330,7 @@ static bool huge_pte_at_handle_sentinel(pte_t *sentinel_pte, pte_t *pte,
         * only update the sentinel.
         */
        if (pte_val(orig) & _PAGE_VALID) {
-               if ((pte_val(orig) & _PAGE_E_4V) == 0UL)
+               if ((pte_val(orig) & _PAGE_SPECIAL_4V) == 0UL)
                        *pte = entry;
                rc = false;
        } else if (pte_val(*sentinel_pte) & _PAGE_VALID) {
@@ -578,7 +581,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
                pud = pud_offset(pgd, addr);
                if (!pud_none(*pud)) {
                        pmd = pmd_offset(pud, addr);
-                       if (xl_hugepage_shift == HPAGE_SHIFT)
+                       if (xl_hugepage_shift != XLHPAGE_16GB_SHIFT)
                                pte = (pte_t *)pmd;
                        else if (!pmd_none(*pmd))
                                pte = pte_offset_map(pmd, addr);