]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/mmap: Change munmap to use vma_munmap_struct() for accounting and
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 17 Apr 2024 15:03:05 +0000 (11:03 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 7 Jun 2024 19:42:15 +0000 (15:42 -0400)
surrounding vmas

Clean up the code by changing the munmap operation to use a structure
for the accounting and munmap variables.

Since remove_mt() is only called in one location and the contents will
be reduce to almost nothing.  The remains of the function can be added
to vms_complete_munmap_vmas().

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
mm/internal.h
mm/mmap.c

index 6ebf77853d68cdbfc55971c99d91b015af5aae92..8c02ebf5736c32ee511f1c993bf49348541c7e1c 100644 (file)
@@ -1435,12 +1435,18 @@ struct vma_munmap_struct {
        struct vma_iterator *vmi;
        struct mm_struct *mm;
        struct vm_area_struct *vma;     /* The first vma to munmap */
+       struct vm_area_struct *next;    /* vma after the munmap area */
+       struct vm_area_struct *prev;    /* vma before the munmap area */
        struct list_head *uf;           /* Userfaultfd list_head */
        unsigned long start;            /* Aligned start addr */
        unsigned long end;              /* Aligned end addr */
        int vma_count;                  /* Number of vmas that will be removed */
        unsigned long nr_pages;         /* Number of pages being removed */
        unsigned long locked_vm;        /* Number of locked pages */
+       unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
+       unsigned long exec_vm;
+       unsigned long stack_vm;
+       unsigned long data_vm;
        bool unlock;                    /* Unlock after the munmap */
 };
 
index 374cf89f0c30a0aa8a9e661cf939f672afab4122..ea3edfa8b22cc4e927b23ee2cb004bf448699e95 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -482,7 +482,8 @@ static inline void init_vma_munmap(struct vma_munmap_struct *vms,
        vms->unlock = unlock;
        vms->uf = uf;
        vms->vma_count = 0;
-       vms->nr_pages = vms->locked_vm = 0;
+       vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
+       vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
 }
 
 /*
@@ -2347,30 +2348,6 @@ success:
        return vma;
 }
 
-/*
- * Ok - we have the memory areas we should free on a maple tree so release them,
- * and do the vma updates.
- *
- * Called with the mm semaphore held.
- */
-static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
-{
-       unsigned long nr_accounted = 0;
-       struct vm_area_struct *vma;
-
-       /* Update high watermark before we lower total_vm */
-       update_hiwater_vm(mm);
-       mas_for_each(mas, vma, ULONG_MAX) {
-               long nrpages = vma_pages(vma);
-
-               if (vma->vm_flags & VM_ACCOUNT)
-                       nr_accounted += nrpages;
-               vm_stat_account(mm, vma->vm_flags, -nrpages);
-               remove_vma(vma, false);
-       }
-       vm_unacct_memory(nr_accounted);
-}
-
 /*
  * Get rid of page table information in the indicated region.
  *
@@ -2589,16 +2566,20 @@ static inline void abort_munmap_vmas(struct ma_state *mas_detach)
        __mt_destroy(mas_detach->tree);
 }
 
+static inline void vms_vm_stat_account(struct vma_munmap_struct *vms);
 /*
  * vms_complete_munmap_vmas() - Finish the munmap() operation
  * @vms: The vma munmap struct
  * @mas_detach: The maple state of the detached vmas
+ *
+ * This function updates the mm_struct, unmaps the region, frees the resources
+ * used for the munmap() and may downgrade the lock - if requested.  Everything
+ * needed to be done once the vma maple tree is updated.
  */
-
 static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
                struct ma_state *mas_detach)
 {
-       struct vm_area_struct *prev, *next;
+       struct vm_area_struct *vma;
        struct mm_struct *mm;
 
        mm = vms->mm;
@@ -2607,21 +2588,21 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
        if (vms->unlock)
                mmap_write_downgrade(mm);
 
-       prev = vma_iter_prev_range(vms->vmi);
-       next = vma_next(vms->vmi);
-       if (next)
-               vma_iter_prev_range(vms->vmi);
-
        /*
         * We can free page tables without write-locking mmap_lock because VMAs
         * were isolated before we downgraded mmap_lock.
         */
        mas_set(mas_detach, 1);
-       unmap_region(mm, mas_detach, vms->vma, prev, next, vms->start, vms->end,
-                    vms->vma_count, !vms->unlock);
-       /* Statistics and freeing VMAs */
+       unmap_region(mm, mas_detach, vms->vma, vms->prev, vms->next,
+                    vms->start, vms->end, vms->vma_count, !vms->unlock);
+       /* Update high watermark before we lower total_vm */
+       update_hiwater_vm(mm);
+       vms_vm_stat_account(vms);
        mas_set(mas_detach, 0);
-       remove_mt(mm, mas_detach);
+       mas_for_each(mas_detach, vma, ULONG_MAX)
+               remove_vma(vma, false);
+
+       vm_unacct_memory(vms->nr_accounted);
        validate_mm(mm);
        if (vms->unlock)
                mmap_read_unlock(mm);
@@ -2669,13 +2650,14 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
                if (error)
                        goto start_split_failed;
        }
+       vms->prev = vma_prev(vms->vmi);
 
        /*
         * Detach a range of VMAs from the mm. Using next as a temp variable as
         * it is always overwritten.
         */
-       next = vms->vma;
-       do {
+       for_each_vma_range(*(vms->vmi), next, vms->end) {
+               long nrpages;
                /* Does it split the end? */
                if (next->vm_end > vms->end) {
                        error = __split_vma(vms->vmi, next, vms->end, 0);
@@ -2684,8 +2666,21 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
                }
                vma_start_write(next);
                mas_set(mas_detach, vms->vma_count++);
+               nrpages = vma_pages(next);
+
+               vms->nr_pages += nrpages;
                if (next->vm_flags & VM_LOCKED)
-                       vms->locked_vm += vma_pages(next);
+                       vms->locked_vm += nrpages;
+
+               if (next->vm_flags & VM_ACCOUNT)
+                       vms->nr_accounted += nrpages;
+
+               if (is_exec_mapping(next->vm_flags))
+                       vms->exec_vm += nrpages;
+               else if (is_stack_mapping(next->vm_flags))
+                       vms->stack_vm += nrpages;
+               else if (is_data_mapping(next->vm_flags))
+                       vms->data_vm += nrpages;
 
                error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
                if (error)
@@ -2711,7 +2706,9 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
                BUG_ON(next->vm_start < vms->start);
                BUG_ON(next->vm_start > vms->end);
 #endif
-       } for_each_vma_range(*(vms->vmi), next, vms->end);
+       }
+
+       vms->next = vma_next(vms->vmi);
 
 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
        /* Make sure no VMAs are about to be lost. */
@@ -3632,6 +3629,17 @@ void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
                mm->data_vm += npages;
 }
 
+/* Accounting for munmap */
+static inline void vms_vm_stat_account(struct vma_munmap_struct *vms)
+{
+       struct mm_struct *mm = vms->mm;
+
+       WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
+       mm->exec_vm -= vms->exec_vm;
+       mm->stack_vm -= vms->stack_vm;
+       mm->data_vm -= vms->data_vm;
+}
+
 static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
 
 /*