]> www.infradead.org Git - users/willy/xarray.git/commitdiff
mm/vma: expand mmap_region() munmap call
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Fri, 30 Aug 2024 04:00:49 +0000 (00:00 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 4 Sep 2024 04:15:50 +0000 (21:15 -0700)
Open code the do_vmi_align_munmap() call so that it can be broken up later
in the series.

This requires exposing a few more vma operations.

Link: https://lkml.kernel.org/r/20240830040101.822209-10-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Bert Karwatzki <spasswolf@web.de>
Cc: Jeff Xu <jeffxu@chromium.org>
Cc: Jiri Olsa <olsajiri@gmail.com>
Cc: Kees Cook <kees@kernel.org>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: Paul Moore <paul@paul-moore.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mmap.c
mm/vma.c
mm/vma.h

index ec72f05b05f227bfd42d523acf9ac2837ee1e0b8..84cb4b1df4a200f668a3f25da3ca726a1c780c7a 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1366,6 +1366,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
        struct vm_area_struct *next, *prev, *merge;
        pgoff_t pglen = len >> PAGE_SHIFT;
        unsigned long charged = 0;
+       struct vma_munmap_struct vms;
+       struct ma_state mas_detach;
+       struct maple_tree mt_detach;
        unsigned long end = addr + len;
        unsigned long merge_start = addr, merge_end = end;
        bool writable_file_mapping = false;
@@ -1391,11 +1394,28 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
        /* Find the first overlapping VMA */
        vma = vma_find(&vmi, end);
        if (vma) {
-               /* Unmap any existing mapping in the area */
-               error = do_vmi_align_munmap(&vmi, vma, mm, addr, end, uf, false);
+               mt_init_flags(&mt_detach, vmi.mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
+               mt_on_stack(mt_detach);
+               mas_init(&mas_detach, &mt_detach, /* addr = */ 0);
+               init_vma_munmap(&vms, &vmi, vma, addr, end, uf, /* unlock = */ false);
+               /* Prepare to unmap any existing mapping in the area */
+               error = vms_gather_munmap_vmas(&vms, &mas_detach);
                if (error)
                        return error;
+
+               /* Remove any existing mappings from the vma tree */
+               if (vma_iter_clear_gfp(&vmi, addr, end, GFP_KERNEL))
+                       return -ENOMEM;
+
+               /* Unmap any existing mapping in the area */
+               vms_complete_munmap_vmas(&vms, &mas_detach);
+               next = vms.next;
+               prev = vms.prev;
+               vma_prev(&vmi);
                vma = NULL;
+       } else {
+               next = vma_next(&vmi);
+               prev = vma_prev(&vmi);
        }
 
        /*
@@ -1408,8 +1428,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
                vm_flags |= VM_ACCOUNT;
        }
 
-       next = vma_next(&vmi);
-       prev = vma_prev(&vmi);
        if (vm_flags & VM_SPECIAL) {
                if (prev)
                        vma_iter_next_range(&vmi);
index 4e08c1654bdd05a39eff3445f606c9e750a28d68..fc425eb34bf795b8244a5710f9333585dde43aff 100644 (file)
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -80,33 +80,6 @@ static void init_multi_vma_prep(struct vma_prepare *vp,
 
 }
 
-/*
- * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
- * @vms: The vma munmap struct
- * @vmi: The vma iterator
- * @vma: The first vm_area_struct to munmap
- * @start: The aligned start address to munmap
- * @end: The aligned end address to munmap
- * @uf: The userfaultfd list_head
- * @unlock: Unlock after the operation.  Only unlocked on success
- */
-static inline void init_vma_munmap(struct vma_munmap_struct *vms,
-               struct vma_iterator *vmi, struct vm_area_struct *vma,
-               unsigned long start, unsigned long end, struct list_head *uf,
-               bool unlock)
-{
-       vms->vmi = vmi;
-       vms->vma = vma;
-       vms->mm = vma->vm_mm;
-       vms->start = start;
-       vms->end = end;
-       vms->unlock = unlock;
-       vms->uf = uf;
-       vms->vma_count = 0;
-       vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
-       vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
-}
-
 /*
  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
  * in front of (at a lower virtual address and file offset than) the vma.
@@ -698,7 +671,7 @@ static inline void abort_munmap_vmas(struct ma_state *mas_detach)
  * used for the munmap() and may downgrade the lock - if requested.  Everything
  * needed to be done once the vma maple tree is updated.
  */
-static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
+void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
                struct ma_state *mas_detach)
 {
        struct vm_area_struct *vma;
@@ -752,7 +725,7 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
  *
  * Return: 0 on success, -EPERM on mseal vmas, -ENOMEM otherwise
  */
-static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
+int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
                struct ma_state *mas_detach)
 {
        struct vm_area_struct *next = NULL;
index cbf55e0e0c4fca741ba3a2db0ed6c0aef1b38dc4..e78b24d1cf83cee354523ba16f85a41e30c6c6d6 100644 (file)
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -78,6 +78,39 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
               unsigned long start, unsigned long end, pgoff_t pgoff);
 
+/*
+ * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
+ * @vms: The vma munmap struct
+ * @vmi: The vma iterator
+ * @vma: The first vm_area_struct to munmap
+ * @start: The aligned start address to munmap
+ * @end: The aligned end address to munmap
+ * @uf: The userfaultfd list_head
+ * @unlock: Unlock after the operation.  Only unlocked on success
+ */
+static inline void init_vma_munmap(struct vma_munmap_struct *vms,
+               struct vma_iterator *vmi, struct vm_area_struct *vma,
+               unsigned long start, unsigned long end, struct list_head *uf,
+               bool unlock)
+{
+       vms->vmi = vmi;
+       vms->vma = vma;
+       vms->mm = vma->vm_mm;
+       vms->start = start;
+       vms->end = end;
+       vms->unlock = unlock;
+       vms->uf = uf;
+       vms->vma_count = 0;
+       vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
+       vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
+}
+
+int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
+               struct ma_state *mas_detach);
+
+void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
+               struct ma_state *mas_detach);
+
 int
 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
                    struct mm_struct *mm, unsigned long start,