]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
set_memory: allow set_direct_map_*_noflush() for multiple pages
authorMike Rapoport <rppt@linux.ibm.com>
Thu, 22 Apr 2021 06:43:26 +0000 (16:43 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 6 May 2021 01:46:46 +0000 (11:46 +1000)
The underlying implementations of set_direct_map_invalid_noflush() and
set_direct_map_default_noflush() allow updating multiple contiguous pages
at once.

Add numpages parameter to set_direct_map_*_noflush() to expose this
ability with these APIs.

Link: https://lkml.kernel.org/r/20210303162209.8609-5-rppt@kernel.org
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64]
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Christopher Lameter <cl@linux.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Elena Reshetova <elena.reshetova@intel.com>
Cc: Hagen Paul Pfeifer <hagen@jauu.net>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Palmer Dabbelt <palmerdabbelt@google.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rick Edgecombe <rick.p.edgecombe@intel.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tycho Andersen <tycho@tycho.ws>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
arch/arm64/include/asm/cacheflush.h
arch/arm64/mm/pageattr.c
arch/riscv/include/asm/set_memory.h
arch/riscv/mm/pageattr.c
arch/x86/include/asm/set_memory.h
arch/x86/mm/pat/set_memory.c
include/linux/set_memory.h
kernel/power/snapshot.c
mm/vmalloc.c

index 52e5c162322401d38a5942905e7ca5dae150578b..ace2c3d7ae7e3c7967507029e27fdba89aad675a 100644 (file)
@@ -133,8 +133,8 @@ static __always_inline void __flush_icache_all(void)
 
 int set_memory_valid(unsigned long addr, int numpages, int enable);
 
-int set_direct_map_invalid_noflush(struct page *page);
-int set_direct_map_default_noflush(struct page *page);
+int set_direct_map_invalid_noflush(struct page *page, int numpages);
+int set_direct_map_default_noflush(struct page *page, int numpages);
 bool kernel_page_present(struct page *page);
 
 #include <asm-generic/cacheflush.h>
index 92eccaf595c8e56cc78a521bd26286595017ca71..b53ef37bf95a8ad1458697b6d027d2d03fbc5e81 100644 (file)
@@ -148,34 +148,36 @@ int set_memory_valid(unsigned long addr, int numpages, int enable)
                                        __pgprot(PTE_VALID));
 }
 
-int set_direct_map_invalid_noflush(struct page *page)
+int set_direct_map_invalid_noflush(struct page *page, int numpages)
 {
        struct page_change_data data = {
                .set_mask = __pgprot(0),
                .clear_mask = __pgprot(PTE_VALID),
        };
+       unsigned long size = PAGE_SIZE * numpages;
 
        if (!debug_pagealloc_enabled() && !rodata_full)
                return 0;
 
        return apply_to_page_range(&init_mm,
                                   (unsigned long)page_address(page),
-                                  PAGE_SIZE, change_page_range, &data);
+                                  size, change_page_range, &data);
 }
 
-int set_direct_map_default_noflush(struct page *page)
+int set_direct_map_default_noflush(struct page *page, int numpages)
 {
        struct page_change_data data = {
                .set_mask = __pgprot(PTE_VALID | PTE_WRITE),
                .clear_mask = __pgprot(PTE_RDONLY),
        };
+       unsigned long size = PAGE_SIZE * numpages;
 
        if (!debug_pagealloc_enabled() && !rodata_full)
                return 0;
 
        return apply_to_page_range(&init_mm,
                                   (unsigned long)page_address(page),
-                                  PAGE_SIZE, change_page_range, &data);
+                                  size, change_page_range, &data);
 }
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
index a9c56776fa0e74d614e0ddcc40291c7c6ad80850..b766f2ccd9de1af146559a7548891957b6533c06 100644 (file)
@@ -27,8 +27,8 @@ static inline void protect_kernel_text_data(void) {}
 static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
 #endif
 
-int set_direct_map_invalid_noflush(struct page *page);
-int set_direct_map_default_noflush(struct page *page);
+int set_direct_map_invalid_noflush(struct page *page, int numpages);
+int set_direct_map_default_noflush(struct page *page, int numpages);
 bool kernel_page_present(struct page *page);
 
 #endif /* __ASSEMBLY__ */
index 5e49e4b4a4cccc08df7282af5aabb193fb377daf..9618181b70be442473db8c715dfee55948797c4d 100644 (file)
@@ -156,11 +156,11 @@ int set_memory_nx(unsigned long addr, int numpages)
        return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
 }
 
-int set_direct_map_invalid_noflush(struct page *page)
+int set_direct_map_invalid_noflush(struct page *page, int numpages)
 {
        int ret;
        unsigned long start = (unsigned long)page_address(page);
-       unsigned long end = start + PAGE_SIZE;
+       unsigned long end = start + PAGE_SIZE * numpages;
        struct pageattr_masks masks = {
                .set_mask = __pgprot(0),
                .clear_mask = __pgprot(_PAGE_PRESENT)
@@ -173,11 +173,11 @@ int set_direct_map_invalid_noflush(struct page *page)
        return ret;
 }
 
-int set_direct_map_default_noflush(struct page *page)
+int set_direct_map_default_noflush(struct page *page, int numpages)
 {
        int ret;
        unsigned long start = (unsigned long)page_address(page);
-       unsigned long end = start + PAGE_SIZE;
+       unsigned long end = start + PAGE_SIZE * numpages;
        struct pageattr_masks masks = {
                .set_mask = PAGE_KERNEL,
                .clear_mask = __pgprot(0)
index 43fa081a1adb236daf9c2f4de4e89616285ed40f..5f84aa4b6961791d4c829197bca04069cfd6c5a4 100644 (file)
@@ -80,8 +80,8 @@ int set_pages_wb(struct page *page, int numpages);
 int set_pages_ro(struct page *page, int numpages);
 int set_pages_rw(struct page *page, int numpages);
 
-int set_direct_map_invalid_noflush(struct page *page);
-int set_direct_map_default_noflush(struct page *page);
+int set_direct_map_invalid_noflush(struct page *page, int numpages);
+int set_direct_map_default_noflush(struct page *page, int numpages);
 bool kernel_page_present(struct page *page);
 
 extern int kernel_set_to_readonly;
index 156cd235659f3d516a4323f5a0498676b63fdbe6..15a55d6e9ceca7cf1757af082be4993a31ba9e51 100644 (file)
@@ -2192,14 +2192,14 @@ static int __set_pages_np(struct page *page, int numpages)
        return __change_page_attr_set_clr(&cpa, 0);
 }
 
-int set_direct_map_invalid_noflush(struct page *page)
+int set_direct_map_invalid_noflush(struct page *page, int numpages)
 {
-       return __set_pages_np(page, 1);
+       return __set_pages_np(page, numpages);
 }
 
-int set_direct_map_default_noflush(struct page *page)
+int set_direct_map_default_noflush(struct page *page, int numpages)
 {
-       return __set_pages_p(page, 1);
+       return __set_pages_p(page, numpages);
 }
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
index fe1aa4e54680d8263d66c8e775f4e8571ade9df4..c650f82db81302a269bc4c10804c23850d7dcdae 100644 (file)
@@ -15,11 +15,11 @@ static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
 #endif
 
 #ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP
-static inline int set_direct_map_invalid_noflush(struct page *page)
+static inline int set_direct_map_invalid_noflush(struct page *page, int numpages)
 {
        return 0;
 }
-static inline int set_direct_map_default_noflush(struct page *page)
+static inline int set_direct_map_default_noflush(struct page *page, int numpages)
 {
        return 0;
 }
index 1a221dcb3c01cb107769cc6c3e0475ca710711cc..27cb4e7086b7e761829dbaaba3b063fd1e866d70 100644 (file)
@@ -86,7 +86,7 @@ static inline void hibernate_restore_unprotect_page(void *page_address) {}
 static inline void hibernate_map_page(struct page *page)
 {
        if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
-               int ret = set_direct_map_default_noflush(page);
+               int ret = set_direct_map_default_noflush(page, 1);
 
                if (ret)
                        pr_warn_once("Failed to remap page\n");
@@ -99,7 +99,7 @@ static inline void hibernate_unmap_page(struct page *page)
 {
        if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
                unsigned long addr = (unsigned long)page_address(page);
-               int ret  = set_direct_map_invalid_noflush(page);
+               int ret = set_direct_map_invalid_noflush(page, 1);
 
                if (ret)
                        pr_warn_once("Failed to remap page\n");
index a13ac524f6ff8e52d8dcaafd6a683c25e7a371de..5d96fee1722682476309e989621c2381bf25ce4b 100644 (file)
@@ -2469,14 +2469,15 @@ struct vm_struct *remove_vm_area(const void *addr)
 }
 
 static inline void set_area_direct_map(const struct vm_struct *area,
-                                      int (*set_direct_map)(struct page *page))
+                                      int (*set_direct_map)(struct page *page,
+                                                            int numpages))
 {
        int i;
 
        /* HUGE_VMALLOC passes small pages to set_direct_map */
        for (i = 0; i < area->nr_pages; i++)
                if (page_address(area->pages[i]))
-                       set_direct_map(area->pages[i]);
+                       set_direct_map(area->pages[i], 1);
 }
 
 /* Handle removing and resetting vm mappings related to the vm_struct. */