static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pte_page)
{
- *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT);
-}
-
-#define pmd_pgtable(pmd) pmd_page(pmd)
-#else
-
-static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
- pte_t *pte)
-{
- *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT);
-}
-
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
- pgtable_t pte_page)
-{
- *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT);
+ *pmdp = __pmd(__pa(pte_page) | _PMD_PRESENT);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
-#endif
+#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
- extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
- extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+ extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
+ extern pgtable_t pte_alloc_one(struct mm_struct *mm);
+void pte_frag_destroy(void *pte_frag);
- pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel);
++pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel);
+void pte_fragment_free(unsigned long *table, int kernel);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
#define MAX_PGTABLE_INDEX_SIZE 0xf
extern struct kmem_cache *pgtable_cache[];
-#define PGT_CACHE(shift) ({ \
- BUG_ON(!(shift)); \
- pgtable_cache[(shift) - 1]; \
- })
+#define PGT_CACHE(shift) pgtable_cache[shift]
- extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
+ extern pte_t *pte_fragment_alloc(struct mm_struct *, int);
extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
extern void pte_fragment_free(unsigned long *, int);
extern void pmd_fragment_free(unsigned long *);
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pte_page)
{
- *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT);
+ *pmdp = __pmd((unsigned long)pte_page | _PMD_PRESENT);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
+#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
#endif
- extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
- extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+ extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
+ extern pgtable_t pte_alloc_one(struct mm_struct *mm);
+void pte_frag_destroy(void *pte_frag);
- pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel);
++pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel);
+void pte_fragment_free(unsigned long *table, int kernel);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
--- /dev/null
- pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Handling Page Tables through page fragments
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+#include <linux/hugetlb.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+
+void pte_frag_destroy(void *pte_frag)
+{
+ int count;
+ struct page *page;
+
+ page = virt_to_page(pte_frag);
+ /* drop all the pending references */
+ count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
+ /* We allow PTE_FRAG_NR fragments from a PTE page */
+ if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
+ pgtable_page_dtor(page);
+ __free_page(page);
+ }
+}
+
+static pte_t *get_pte_from_cache(struct mm_struct *mm)
+{
+ void *pte_frag, *ret;
+
+ if (PTE_FRAG_NR == 1)
+ return NULL;
+
+ spin_lock(&mm->page_table_lock);
+ ret = pte_frag_get(&mm->context);
+ if (ret) {
+ pte_frag = ret + PTE_FRAG_SIZE;
+ /*
+ * If we have taken up all the fragments mark PTE page NULL
+ */
+ if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
+ pte_frag = NULL;
+ pte_frag_set(&mm->context, pte_frag);
+ }
+ spin_unlock(&mm->page_table_lock);
+ return (pte_t *)ret;
+}
+
+static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
+{
+ void *ret = NULL;
+ struct page *page;
+
+ if (!kernel) {
+ page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
+ if (!page)
+ return NULL;
+ if (!pgtable_page_ctor(page)) {
+ __free_page(page);
+ return NULL;
+ }
+ } else {
+ page = alloc_page(PGALLOC_GFP);
+ if (!page)
+ return NULL;
+ }
+
+ atomic_set(&page->pt_frag_refcount, 1);
+
+ ret = page_address(page);
+ /*
+ * if we support only one fragment just return the
+ * allocated page.
+ */
+ if (PTE_FRAG_NR == 1)
+ return ret;
+ spin_lock(&mm->page_table_lock);
+ /*
+ * If we find pgtable_page set, we return
+ * the allocated page with single fragement
+ * count.
+ */
+ if (likely(!pte_frag_get(&mm->context))) {
+ atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
+ pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE);
+ }
+ spin_unlock(&mm->page_table_lock);
+
+ return (pte_t *)ret;
+}
+
++pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel)
+{
+ pte_t *pte;
+
+ pte = get_pte_from_cache(mm);
+ if (pte)
+ return pte;
+
+ return __alloc_for_ptecache(mm, kernel);
+}
+
+void pte_fragment_free(unsigned long *table, int kernel)
+{
+ struct page *page = virt_to_page(table);
+
+ BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
+ if (atomic_dec_and_test(&page->pt_frag_refcount)) {
+ if (!kernel)
+ pgtable_page_dtor(page);
+ __free_page(page);
+ }
+}
extern char etext[], _stext[], _sinittext[], _einittext[];
- __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+ __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
- pte_t *pte;
+ if (!slab_is_available())
+ return memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
- return (pte_t *)pte_fragment_alloc(mm, address, 1);
- if (slab_is_available()) {
- pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
- } else {
- pte = __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
- if (pte)
- clear_page(pte);
- }
- return pte;
++ return (pte_t *)pte_fragment_alloc(mm, 1);
}
- pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
+ pgtable_t pte_alloc_one(struct mm_struct *mm)
{
- return (pgtable_t)pte_fragment_alloc(mm, address, 0);
- struct page *ptepage;
-
- gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT;
-
- ptepage = alloc_pages(flags, 0);
- if (!ptepage)
- return NULL;
- if (!pgtable_page_ctor(ptepage)) {
- __free_page(ptepage);
- return NULL;
- }
- return ptepage;
++ return (pgtable_t)pte_fragment_alloc(mm, 0);
}
void __iomem *
#include "autofs_i.h"
static struct dentry *autofs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+ int flags, const char *dev_name, void *data, size_t data_size)
{
- return mount_nodev(fs_type, flags, data, autofs_fill_super);
+ return mount_nodev(fs_type, flags, data, data_size, autofs_fill_super);
}
- static struct file_system_type autofs_fs_type = {
+ struct file_system_type autofs_fs_type = {
.owner = THIS_MODULE,
.name = "autofs",
.mount = autofs_mount,
return 1;
}
}
- return (*pipefd < 0);
+ return (sbi->pipefd < 0);
}
-int autofs_fill_super(struct super_block *s, void *data, int silent)
+int autofs_fill_super(struct super_block *s, void *data, size_t data_size,
+ int silent)
{
struct inode *root_inode;
struct dentry *root;
.extra1 = &one,
.extra2 = &one_thousand,
},
++#ifdef CONFIG_NUMA
+ {
+ .procname = "fragment_stall_order",
+ .data = &fragment_stall_order,
+ .maxlen = sizeof(fragment_stall_order),
+ .mode = 0644,
+ .proc_handler = fragment_stall_order_sysctl_handler,
+ .extra1 = &zero,
+ .extra2 = &max_order,
+ },
++#endif
{
.procname = "percpu_pagelist_fraction",
.data = &percpu_pagelist_fraction,