extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
struct page *page, int writable);
#define arch_make_huge_pte arch_make_huge_pte
-static inline pte_t pte_mkhuge(pte_t pte)
+static inline unsigned long __pte_default_huge_mask(void)
{
unsigned long mask;
: "=r" (mask)
: "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
- return __pte(pte_val(pte) | mask);
+ return mask;
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ return __pte(pte_val(pte) | __pte_default_huge_mask());
+}
+
+static inline bool is_default_hugetlb_pte(pte_t pte)
+{
+ unsigned long mask = __pte_default_huge_mask();
+
+ return (pte_val(pte) & mask) == mask;
}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
return __pmd(pte_val(pte));
}
#endif
+#else
+static inline bool is_hugetlb_pte(pte_t pte)
+{
+ return false;
+}
#endif
static inline pte_t pte_mkdirty(pte_t pte)
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
pte_t *ptep, pte_t orig, int fullmm);
+static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
+ pte_t *ptep, pte_t orig, int fullmm)
+{
+ /* It is more efficient to let flush_tlb_kernel_range()
+ * handle init_mm tlb flushes.
+ *
+ * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
+ * and SUN4V pte layout, so this inline test is fine.
+ */
+ if (likely(mm != &init_mm) && pte_accessible(mm, orig))
+ tlb_batch_add(mm, vaddr, ptep, orig, fullmm);
+}
+
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
unsigned long addr,
pte_t orig = *ptep;
*ptep = pte;
-
- /* It is more efficient to let flush_tlb_kernel_range()
- * handle init_mm tlb flushes.
- *
- * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
- * and SUN4V pte layout, so this inline test is fine.
- */
- if (likely(mm != &init_mm) && pte_accessible(mm, orig))
- tlb_batch_add(mm, addr, ptep, orig, fullmm);
+ maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm);
}
#define set_pte_at(mm,addr,ptep,pte) \
#define TLB_BATCH_NR 192
struct tlb_batch {
+ bool default_huge;
struct mm_struct *mm;
unsigned long tlb_nr;
unsigned long active;
void flush_tsb_kernel_range(unsigned long start, unsigned long end);
void flush_tsb_user(struct tlb_batch *tb);
-void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
+void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge);
/* TLB flush operations. */
}
static bool __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *pte, pte_t entry, pte_t *sentinel_pte)
+ pte_t *pte, pte_t entry, pte_t *sentinel_pte,
+ unsigned int hugepage_shift)
{
- unsigned int hugepage_shift = tte_to_shift(entry);
bool rc = true;
if (hugepage_shift != REAL_HPAGE_SHIFT) {
entry);
huge_pte_at_flush_update(mm, addr, pte, orig, sentinel_pte);
} else
- set_pte_at(mm, addr, pte, entry);
+ *pte = entry;
return rc;
}
static void __clear_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *pte, pte_t *sentinel_pte)
+ pte_t *pte, pte_t *sentinel_pte,
+ unsigned int hugepage_shift)
{
- unsigned int hugepage_shift = tte_to_shift(*pte);
-
if (hugepage_shift != REAL_HPAGE_SHIFT) {
pte_t orig = *pte;
*pte = __pte(0UL);
huge_pte_at_flush_update(mm, addr, pte, orig, sentinel_pte);
} else
- pte_clear(mm, addr, pte);
+ *pte = __pte(0UL);
}
static bool set_huge_pte_range_at(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte = pte_offset_map(pmd, addr);
pte_t *lpte = pte + PTRS_PER_PTE;
pte_t entry = *pentry;
+ pte_t orig = *(pte_t *)pte;
bool rc = true;
+ unsigned long orig_addr = addr;
+ unsigned int hugepage_shift;
+
+ if (set_at)
+ hugepage_shift = tte_to_shift(entry);
+ else
+ hugepage_shift = tte_to_shift(*pte);
for (; pte < lpte; pte++, addr = addr + PAGE_SIZE) {
if (set_at) {
rc = __set_huge_pte_at(mm, addr, pte, entry,
- sentinel_pte);
+ sentinel_pte, hugepage_shift);
if (!rc)
break;
pte_val(entry) = pte_val(entry) + PAGE_SIZE;
} else
- __clear_huge_pte_at(mm, addr, pte, sentinel_pte);
+ __clear_huge_pte_at(mm, addr, pte, sentinel_pte,
+ hugepage_shift);
}
if (set_at)
*pentry = entry;
+
+ if (hugepage_shift == REAL_HPAGE_SHIFT) {
+ /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
+ maybe_tlb_batch_add(mm, orig_addr, pte, orig, 0);
+ maybe_tlb_batch_add(mm, orig_addr + REAL_HPAGE_SIZE,
+ pte, orig, 0);
+ }
+
return rc;
}
tsb_insert(tsb, tag, tte);
}
-#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-static inline bool is_hugetlb_pte(pte_t pte)
-{
- if ((tlb_type == hypervisor &&
- (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
- (tlb_type != hypervisor &&
- (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
- return true;
- return false;
-}
-#endif
-
#ifdef CONFIG_HUGETLB_PAGE
unsigned int xl_hugepage_shift;
static unsigned long xl_hugepage_pte;
spin_lock_irqsave(&mm->context.lock, flags);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- if (mm->context.huge_pte_count[MM_PTES_HUGE] && is_hugetlb_pte(pte))
+ if (mm->context.huge_pte_count[MM_PTES_HUGE] &&
+ is_default_hugetlb_pte(pte))
__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
address, pte_val(pte));
else if (mm->context.huge_pte_count[MM_PTES_XLHUGE] &&
}
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
- bool exec)
+ bool exec, bool default_huge)
{
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
unsigned long nr;
}
if (!tb->active) {
- flush_tsb_user_page(mm, vaddr);
+ flush_tsb_user_page(mm, vaddr, default_huge);
global_flush_tlb_page(mm, vaddr);
goto out;
}
- if (nr == 0)
+ if (nr == 0) {
tb->mm = mm;
+ tb->default_huge = default_huge;
+ }
+
+ if (tb->default_huge != default_huge) {
+ flush_tlb_pending();
+ tb->default_huge = default_huge;
+ nr = 0;
+ }
tb->vaddrs[nr] = vaddr;
tb->tlb_nr = ++nr;
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
pte_t *ptep, pte_t orig, int fullmm)
{
+ bool default_huge = is_default_hugetlb_pte(orig);
+
if (tlb_type != hypervisor &&
pte_dirty(orig)) {
unsigned long paddr, pfn = pte_pfn(orig);
no_cache_flush:
if (!fullmm)
- tlb_batch_add_one(mm, vaddr, pte_exec(orig));
+ tlb_batch_add_one(mm, vaddr, pte_exec(orig), default_huge);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (pte_val(*pte) & _PAGE_VALID) {
bool exec = pte_exec(*pte);
- tlb_batch_add_one(mm, vaddr, exec);
+ tlb_batch_add_one(mm, vaddr, exec, false);
}
pte++;
vaddr += PAGE_SIZE;
pte_t orig_pte = __pte(pmd_val(orig));
bool exec = pte_exec(orig_pte);
- tlb_batch_add_one(mm, addr, exec);
- tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
+ tlb_batch_add_one(mm, addr, exec, true);
+ tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
+ true);
} else {
tlb_batch_pmd_scan(mm, addr, orig);
}
spin_lock_irqsave(&mm->context.lock, flags);
- base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
- nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
- if (tlb_type == cheetah_plus || tlb_type == hypervisor)
- base = __pa(base);
- __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
-
+ if (!tb->default_huge) {
+ base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
+ nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
+ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+ base = __pa(base);
+ __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
+ }
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+ if (tb->default_huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
}
#endif
#ifdef CONFIG_HUGETLB_PAGE
- if (mm->context.tsb_block[MM_TSB_XLHUGE].tsb) {
+ if (!tb->default_huge && mm->context.tsb_block[MM_TSB_XLHUGE].tsb) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_XLHUGE].tsb;
nentries = mm->context.tsb_block[MM_TSB_XLHUGE].tsb_nentries;
base = __pa(base);
spin_unlock_irqrestore(&mm->context.lock, flags);
}
-void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
+void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
+ bool default_huge)
{
unsigned long nentries, base, flags;
spin_lock_irqsave(&mm->context.lock, flags);
- base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
- nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
- if (tlb_type == cheetah_plus || tlb_type == hypervisor)
- base = __pa(base);
- __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
-
+ if (!default_huge) {
+ base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
+ nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
+ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+ base = __pa(base);
+ __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
+ }
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+ if (default_huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
}
#endif
#ifdef CONFIG_HUGETLB_PAGE
- if (mm->context.tsb_block[MM_TSB_XLHUGE].tsb) {
+ if (!default_huge && mm->context.tsb_block[MM_TSB_XLHUGE].tsb) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_XLHUGE].tsb;
nentries = mm->context.tsb_block[MM_TSB_XLHUGE].tsb_nentries;
base = __pa(base);