]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: Introduce vmap_page_range() to map pages in PCI address space
authorAlexei Starovoitov <ast@kernel.org>
Fri, 8 Mar 2024 17:12:54 +0000 (09:12 -0800)
committerDaniel Borkmann <daniel@iogearbox.net>
Mon, 11 Mar 2024 15:58:10 +0000 (16:58 +0100)
ioremap_page_range() should be used for ranges within vmalloc range only.
The vmalloc ranges are allocated by get_vm_area(). PCI has "resource"
allocator that manages PCI_IOBASE, IO_SPACE_LIMIT address range, hence
introduce vmap_page_range() to be used exclusively to map pages
in PCI address space.

Fixes: 3e49a866c9dc ("mm: Enforce VM_IOREMAP flag and range in ioremap_page_range.")
Reported-by: Miguel Ojeda <ojeda@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Tested-by: Miguel Ojeda <ojeda@kernel.org>
Link: https://lore.kernel.org/bpf/CANiq72ka4rir+RTN2FQoT=Vvprp_Ao-CvoYEkSNqtSY+RZj+AA@mail.gmail.com
arch/arm/mm/ioremap.c
arch/loongarch/kernel/setup.c
arch/mips/loongson64/init.c
arch/powerpc/kernel/isa-bridge.c
drivers/pci/pci.c
include/linux/io.h
mm/vmalloc.c

index 2129070065c32328baed62a73449f10bc9caf4e1..794cfea9f9d4c894d906d3032cac50cbbf0161ef 100644 (file)
@@ -110,8 +110,8 @@ void __init add_static_vm_early(struct static_vm *svm)
 int ioremap_page(unsigned long virt, unsigned long phys,
                 const struct mem_type *mtype)
 {
-       return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
-                                 __pgprot(mtype->prot_pte));
+       return vmap_page_range(virt, virt + PAGE_SIZE, phys,
+                              __pgprot(mtype->prot_pte));
 }
 EXPORT_SYMBOL(ioremap_page);
 
@@ -466,8 +466,8 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
        if (res->end > IO_SPACE_LIMIT)
                return -EINVAL;
 
-       return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
-                                 __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
+       return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+                              __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
 }
 EXPORT_SYMBOL(pci_remap_iospace);
 
index 634ef17fd38bf10d8bd9deef8a6693f0f4777c1e..fd915ad69c09b8f0d0da3f2934b4018078015706 100644 (file)
@@ -490,7 +490,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode,
        }
 
        vaddr = (unsigned long)(PCI_IOBASE + range->io_start);
-       ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
+       vmap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
 
        return 0;
 }
index 553142c1f14fe2261d963b3784f3ed9e6c086cd2..a35dd731179582f981de5517ee827295d6796173 100644 (file)
@@ -180,7 +180,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_
 
        vaddr = PCI_IOBASE + range->io_start;
 
-       ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
+       vmap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
 
        return 0;
 }
index 48e0eaf1ad61559a374fe83b33c65aee6ef35e7f..5c064485197a9059835f23530c0841f78803fbe8 100644 (file)
@@ -46,8 +46,8 @@ static void remap_isa_base(phys_addr_t pa, unsigned long size)
        WARN_ON_ONCE(size & ~PAGE_MASK);
 
        if (slab_is_available()) {
-               if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
-                               pgprot_noncached(PAGE_KERNEL)))
+               if (vmap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
+                                   pgprot_noncached(PAGE_KERNEL)))
                        vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size);
        } else {
                early_ioremap_range(ISA_IO_BASE, pa, size,
index c3585229c12a2145401d675ff84c20288b8f158e..ccee56615f784ce1f43f848eede4dfb697461472 100644 (file)
@@ -4353,8 +4353,8 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
        if (res->end > IO_SPACE_LIMIT)
                return -EINVAL;
 
-       return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
-                                 pgprot_device(PAGE_KERNEL));
+       return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+                              pgprot_device(PAGE_KERNEL));
 #else
        /*
         * This architecture does not have memory mapped I/O space,
index 7304f2a69960a3493d7b218e43ee9a2155df78f2..235ba7d80a8f0d76e9d33b772226ebab40bfe8fd 100644 (file)
@@ -23,12 +23,19 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
 #ifdef CONFIG_MMU
 int ioremap_page_range(unsigned long addr, unsigned long end,
                       phys_addr_t phys_addr, pgprot_t prot);
+int vmap_page_range(unsigned long addr, unsigned long end,
+                   phys_addr_t phys_addr, pgprot_t prot);
 #else
 static inline int ioremap_page_range(unsigned long addr, unsigned long end,
                                     phys_addr_t phys_addr, pgprot_t prot)
 {
        return 0;
 }
+static inline int vmap_page_range(unsigned long addr, unsigned long end,
+                                 phys_addr_t phys_addr, pgprot_t prot)
+{
+       return 0;
+}
 #endif
 
 /*
index e5b8c70950bc7aa494541ad3b5e60b031fa05d05..1e36322d83d895ca8964240d8c5fa9dfb14868a9 100644 (file)
@@ -304,11 +304,24 @@ static int vmap_range_noflush(unsigned long addr, unsigned long end,
        return err;
 }
 
+int vmap_page_range(unsigned long addr, unsigned long end,
+                   phys_addr_t phys_addr, pgprot_t prot)
+{
+       int err;
+
+       err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
+                                ioremap_max_page_shift);
+       flush_cache_vmap(addr, end);
+       if (!err)
+               err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
+                                              ioremap_max_page_shift);
+       return err;
+}
+
 int ioremap_page_range(unsigned long addr, unsigned long end,
                phys_addr_t phys_addr, pgprot_t prot)
 {
        struct vm_struct *area;
-       int err;
 
        area = find_vm_area((void *)addr);
        if (!area || !(area->flags & VM_IOREMAP)) {
@@ -322,13 +335,7 @@ int ioremap_page_range(unsigned long addr, unsigned long end,
                          (long)area->addr + get_vm_area_size(area));
                return -ERANGE;
        }
-       err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
-                                ioremap_max_page_shift);
-       flush_cache_vmap(addr, end);
-       if (!err)
-               err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
-                                              ioremap_max_page_shift);
-       return err;
+       return vmap_page_range(addr, end, phys_addr, prot);
 }
 
 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,