static inline pte_t pte_mkhuge(pte_t pte)
{
- return __pte(pte_val(pte) | __pte_default_huge_mask());
+ return pte;
}
static inline bool is_default_hugetlb_pte(pte_t pte)
* if it is a HUGE PMD or a normal one. If it is not valid
* then jump to FAIL_LABEL. If it is a HUGE PMD, and it
* translates to a valid PTE, branch to PTE_LABEL.
- *
- * We have to propagate the 4MB bit of the virtual address
- * because we are fabricating 8MB pages using 4MB hw pages.
*/
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
#define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
brz,pn REG1, FAIL_LABEL; \
sethi %uhi(_PAGE_PMD_HUGE), REG2; \
sllx REG2, 32, REG2; \
- andcc REG1, REG2, %g0; \
be,pt %xcc, 700f; \
- sethi %hi(4 * 1024 * 1024), REG2; \
- brgez,pn REG1, FAIL_LABEL; \
- andn REG1, REG2, REG1; \
- and VADDR, REG2, REG2; \
+ andcc REG1, REG2, %g0; \
brlz,pt REG1, PTE_LABEL; \
- or REG1, REG2, REG1; \
+ andn REG1, REG2, REG1; \
700:
#else
#define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
*/
.global sun4v_xl_hugepages
sun4v_xl_hugepages:
- andcc %g5, _PAGE_E_4V, %g0
+ sethi %uhi(_PAGE_SPECIAL_4V), %g2
+ sllx %g2, 32, %g2
+ andcc %g5, %g2, %g0
be,pt %xcc, 10f
- sethi %uhi(_PAGE_VALID), %g1;
+ sethi %uhi(_PAGE_VALID | _PAGE_SPECIAL_4V), %g1;
sllx %g1, 32, %g1
- or %g1, _PAGE_SZALL_4V | _PAGE_E_4V, %g1
+ or %g1, _PAGE_SZALL_4V, %g1
andn %g5, %g1, %g1
ldxa [%g1 + %g0] ASI_PHYS_USE_EC, %g5
brgez,pn %g5, tsb_do_fault
bne,pt %xcc, 60f
nop
+ /* It is the default huge page (8M). We have to propagate
+ * the 4MB bit of the virtual address because we are
+ * fabricating 8MB pages using 4MB hw pages.
+ */
+ sethi %hi(4 * 1024 * 1024), %g2
+ andn %g5, %g2, %g5
+ and %g4, %g2, %g2
+ or %g5, %g2, %g5
+
/* It is a huge page, use huge page TSB entry address we
* calculated above. If the huge page TSB has not been
* allocated, setup a trap stack and call hugetlb_setup()
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (is_hugetlb_pmd(*pmdp)) {
+ unsigned long hpage_mask = HPAGE_MASK;
+
+ if (xl_hugepage_shift == XLHPAGE_2GB_SHIFT)
+ hpage_mask = ~((1UL << xl_hugepage_shift) - 1);
if (pmd_trans_splitting(*pmdp))
goto out_irq_enable;
pa = pmd_pfn(*pmdp) << PAGE_SHIFT;
- pa += tpc & ~HPAGE_MASK;
+ pa += tpc & ~hpage_mask;
/* Use phys bypass so we don't pollute dtlb/dcache. */
__asm__ __volatile__("lduwa [%1] %2, %0"
if (!pmd)
goto fail;
- if (size == HPAGE_SIZE) {
+ if (size != (1UL << XLHPAGE_16GB_SHIFT)) {
rpte = (pte_t *)pmd;
break;
}
/* 2Gb */
case XLHPAGE_2GB_SHIFT:
sun4v_hugepage_size = _PAGE_SZ2GB_4V;
+ pte_val(entry) |= _PAGE_PMD_HUGE;
break;
/* 8Mb */
case HPAGE_SHIFT:
static void form_sentinel(pte_t *sentinel_pte, pte_t entry, pte_t *pte)
{
- pte_t sentinel = __pte(_PAGE_VALID | _PAGE_E_4V |
- (pte_val(entry) & _PAGE_SZALL_4V) | __pa(pte));
+ pte_t sentinel = __pte(_PAGE_VALID | _PAGE_SPECIAL_4V |
+ _PAGE_PMD_HUGE | (pte_val(entry) & _PAGE_SZALL_4V) |
+ __pa(pte));
+ BUG_ON(__pa(pte) & _PAGE_SZALL_4V);
*sentinel_pte = sentinel;
}
* only update the sentinel.
*/
if (pte_val(orig) & _PAGE_VALID) {
- if ((pte_val(orig) & _PAGE_E_4V) == 0UL)
+ if ((pte_val(orig) & _PAGE_SPECIAL_4V) == 0UL)
*pte = entry;
rc = false;
} else if (pte_val(*sentinel_pte) & _PAGE_VALID) {
pud = pud_offset(pgd, addr);
if (!pud_none(*pud)) {
pmd = pmd_offset(pud, addr);
- if (xl_hugepage_shift == HPAGE_SHIFT)
+ if (xl_hugepage_shift != XLHPAGE_16GB_SHIFT)
pte = (pte_t *)pmd;
else if (!pmd_none(*pmd))
pte = pte_offset_map(pmd, addr);