#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
/* Slightly simplified from the non-hugepage variant because by
* definition we don't have to worry about any page coloring stuff
if (!pmd)
goto fail;
+ if (size == HPAGE_SIZE) {
+ rpte = (pte_t *)pmd;
+ break;
+ }
pte = pte_alloc_map(mm, NULL, pmd, addr);
}
/* This should also be moved and a noop for sun4u.
- * Only include xl hugepage sizes we plan to support.
+ * Only include hugepage sizes we plan to support.
*/
static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int hugepage_shift)
{
case XLHPAGE_2GB_SHIFT:
sun4v_hugepage_size = _PAGE_SZ2GB_4V;
break;
+ /* 8Mb */
+ case HPAGE_SHIFT:
+ pte_val(entry) |= _PAGE_PMD_HUGE;
+ break;
default:
WARN_ONCE(hugepage_shift,
"hugepage_shift_to_tte: unsupported "
{
unsigned int hugepage_shift = huge_page_shift(hstate_vma(vma));
- if (hugepage_shift == HPAGE_SHIFT)
- goto out;
- entry = hugepage_shift_to_tte(entry, hugepage_shift);
-out:
- return entry;
+ return hugepage_shift_to_tte(entry, hugepage_shift);
}
static void huge_pte_at_flush_update(struct mm_struct *mm, unsigned long addr,
return rc;
}
+static bool __set_huge_pmd_at(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, unsigned long end, pte_t *pentry,
+ pte_t *sentinel_pte, bool set_at)
+{
+ bool rc;
+ pte_t orig;
+ pte_t entry;
+ unsigned long next;
+ unsigned long hugepage_shift;
+
+ rc = true;
+ orig = *(pte_t *)pmd;
+ entry = *pentry;
+
+ if (set_at) {
+ hugepage_shift = tte_to_shift(entry);
+ if (hugepage_shift == REAL_HPAGE_SHIFT) {
+ *pmd = __pmd(pte_val(entry));
+ } else {
+ do {
+ next = pmd_addr_end(addr, end);
+ rc = __set_huge_pte_at(mm, addr, (pte_t *)pmd,
+ entry, sentinel_pte, hugepage_shift);
+ if (!rc)
+ break;
+ } while (pmd++, addr = next, addr != end);
+ }
+ *pentry = entry;
+ } else {
+ hugepage_shift = tte_to_shift(orig);
+ if (hugepage_shift == REAL_HPAGE_SHIFT) {
+ *pmd = __pmd(0);
+ } else {
+ do {
+ next = pmd_addr_end(addr, end);
+ __clear_huge_pte_at(mm, addr, (pte_t *)pmd,
+ sentinel_pte, hugepage_shift);
+ } while (pmd++, addr = next, addr != end);
+ }
+ }
+
+ if (hugepage_shift == REAL_HPAGE_SHIFT) {
+ /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
+ maybe_tlb_batch_add(mm, addr, (pte_t *)pmd, orig, 0);
+ maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE,
+ (pte_t *)pmd, orig, 0);
+ }
+
+ return rc;
+}
+
static bool set_huge_pmd_at(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
pte_t *pentry, pte_t *sentinel_pte, bool set_at)
pmd_t *pmd = pmd_offset(pud, addr);
unsigned long next;
bool rc;
+ unsigned int is_huge_pmd;
+
+ if (set_at)
+ is_huge_pmd = is_hugetlb_pmd(__pmd(pte_val(*pentry)));
+ else
+ is_huge_pmd = is_hugetlb_pmd(*pmd);
+
+ if (is_huge_pmd)
+ return __set_huge_pmd_at(mm, pmd, addr, end, pentry,
+ sentinel_pte, set_at);
do {
next = pmd_addr_end(addr, end);
rc = set_huge_pte_range_at(mm, pmd, addr, pentry,
- sentinel_pte, set_at);
+ sentinel_pte, set_at);
} while (pmd++, addr = next, ((addr != end) && rc));
return rc;
}
pud = pud_offset(pgd, addr);
if (!pud_none(*pud)) {
pmd = pmd_offset(pud, addr);
- if (!pmd_none(*pmd))
+ if (xl_hugepage_shift == HPAGE_SHIFT)
+ pte = (pte_t *)pmd;
+ else if (!pmd_none(*pmd))
pte = pte_offset_map(pmd, addr);
}
}
{
return 0;
}
+
+static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
+ unsigned long addr)
+{
+ pgtable_t token = pmd_pgtable(*pmd);
+
+ pmd_clear(pmd);
+ pte_free_tlb(tlb, token, addr);
+ atomic_long_dec(&tlb->mm->nr_ptes);
+}
+
+static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
+{
+ pmd_t *pmd;
+ unsigned long next;
+ unsigned long start;
+
+ start = addr;
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+ if (pmd_none(*pmd))
+ continue;
+ if (is_hugetlb_pmd(*pmd))
+ pmd_clear(pmd);
+ else
+ hugetlb_free_pte_range(tlb, pmd, addr);
+ } while (pmd++, addr = next, addr != end);
+
+ start &= PUD_MASK;
+ if (start < floor)
+ return;
+ if (ceiling) {
+ ceiling &= PUD_MASK;
+ if (!ceiling)
+ return;
+ }
+ if (end - 1 > ceiling - 1)
+ return;
+
+ pmd = pmd_offset(pud, start);
+ pud_clear(pud);
+ pmd_free_tlb(tlb, pmd, start);
+ mm_dec_nr_pmds(tlb->mm);
+}
+
+static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
+{
+ pud_t *pud;
+ unsigned long next;
+ unsigned long start;
+
+ start = addr;
+ pud = pud_offset(pgd, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none_or_clear_bad(pud))
+ continue;
+ hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
+ ceiling);
+ } while (pud++, addr = next, addr != end);
+
+ start &= PGDIR_MASK;
+ if (start < floor)
+ return;
+ if (ceiling) {
+ ceiling &= PGDIR_MASK;
+ if (!ceiling)
+ return;
+ }
+ if (end - 1 > ceiling - 1)
+ return;
+
+ pud = pud_offset(pgd, start);
+ pgd_clear(pgd);
+ pud_free_tlb(tlb, pud, start);
+}
+
+void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
+{
+ pgd_t *pgd;
+ unsigned long next;
+
+ pgd = pgd_offset(tlb->mm, addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
+ } while (pgd++, addr = next, addr != end);
+}