]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mmap: Convert __vma_adjust() to use vma iterator
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Thu, 10 Nov 2022 19:32:51 +0000 (14:32 -0500)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Tue, 13 Dec 2022 21:03:39 +0000 (16:03 -0500)
Use the vma iterator internally for __vma_adjust().  Avoid using the
maple tree interface directly for type safety.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
include/linux/mm.h
mm/mmap.c

index cec0eccde16e2fca785676387e85e6c257fd43d2..017097508b7836951dce6e63a7d37f97e7f65dff 100644 (file)
@@ -2738,9 +2738,6 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
        bool *need_rmap_locks);
 extern void exit_mmap(struct mm_struct *);
 
-void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas);
-void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas);
-
 static inline int check_data_rlimit(unsigned long rlim,
                                    unsigned long new,
                                    unsigned long start,
index b8a4b2bca3d30887e5252054830251f5b6fa20cb..02e63d1e21e7c5ddcf1e0630f94408fa835a3c15 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -494,56 +494,6 @@ static void __vma_link_file(struct vm_area_struct *vma,
        flush_dcache_mmap_unlock(mapping);
 }
 
-/*
- * vma_mas_store() - Store a VMA in the maple tree.
- * @vma: The vm_area_struct
- * @mas: The maple state
- *
- * Efficient way to store a VMA in the maple tree when the @mas has already
- * walked to the correct location.
- *
- * Note: the end address is inclusive in the maple tree.
- */
-void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas)
-{
-       trace_vma_store(mas->tree, vma);
-       mas_set_range(mas, vma->vm_start, vma->vm_end - 1);
-       mas_store_prealloc(mas, vma);
-}
-
-/*
- * vma_mas_remove() - Remove a VMA from the maple tree.
- * @vma: The vm_area_struct
- * @mas: The maple state
- *
- * Efficient way to remove a VMA from the maple tree when the @mas has already
- * been established and points to the correct location.
- * Note: the end address is inclusive in the maple tree.
- */
-void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
-{
-       trace_vma_mas_szero(mas->tree, vma->vm_start, vma->vm_end - 1);
-       mas->index = vma->vm_start;
-       mas->last = vma->vm_end - 1;
-       mas_store_prealloc(mas, NULL);
-}
-
-/*
- * vma_mas_szero() - Set a given range to zero.  Used when modifying a
- * vm_area_struct start or end.
- *
- * @mas: The maple tree ma_state
- * @start: The start address to zero
- * @end: The end address to zero.
- */
-static inline void vma_mas_szero(struct ma_state *mas, unsigned long start,
-                               unsigned long end)
-{
-       trace_vma_mas_szero(mas->tree, start, end - 1);
-       mas_set_range(mas, start, end - 1);
-       mas_store_prealloc(mas, NULL);
-}
-
 static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
 {
        VMA_ITERATOR(vmi, mm, 0);
@@ -703,7 +653,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        bool vma_changed = false;
        long adjust_next = 0;
        int remove_next = 0;
-       MA_STATE(mas, &mm->mm_mt, 0, 0);
+       VMA_ITERATOR(vmi, mm, 0);
        struct vm_area_struct *exporter = NULL, *importer = NULL;
 
        if (next && !insert) {
@@ -788,7 +738,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
                }
        }
 
-       if (mas_preallocate(&mas, vma, GFP_KERNEL))
+       if (vma_iter_prealloc(&vmi, vma))
                return -ENOMEM;
 
        vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
@@ -834,7 +784,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        if (start != vma->vm_start) {
                if ((vma->vm_start < start) &&
                    (!insert || (insert->vm_end != start))) {
-                       vma_mas_szero(&mas, vma->vm_start, start);
+                       vma_iter_clear(&vmi, vma->vm_start, start);
                        VM_WARN_ON(insert && insert->vm_start > vma->vm_start);
                } else {
                        vma_changed = true;
@@ -844,8 +794,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        if (end != vma->vm_end) {
                if (vma->vm_end > end) {
                        if (!insert || (insert->vm_start != end)) {
-                               vma_mas_szero(&mas, end, vma->vm_end);
-                               mas_reset(&mas);
+                               vma_iter_clear(&vmi, end, vma->vm_end);
+                               vma_iter_set(&vmi, vma->vm_end);
                                VM_WARN_ON(insert &&
                                           insert->vm_end < vma->vm_end);
                        }
@@ -856,13 +806,13 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        }
 
        if (vma_changed)
-               vma_mas_store(vma, &mas);
+               vma_iter_store(&vmi, vma);
 
        vma->vm_pgoff = pgoff;
        if (adjust_next) {
                next->vm_start += adjust_next;
                next->vm_pgoff += adjust_next >> PAGE_SHIFT;
-               vma_mas_store(next, &mas);
+               vma_iter_store(&vmi, next);
        }
 
        if (file) {
@@ -882,8 +832,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
                 * us to insert it before dropping the locks
                 * (it may either follow vma or precede it).
                 */
-               mas_reset(&mas);
-               vma_mas_store(insert, &mas);
+               vma_iter_store(&vmi, insert);
                mm->map_count++;
        }
 
@@ -929,7 +878,7 @@ again:
        if (insert && file)
                uprobe_mmap(insert);
 
-       mas_destroy(&mas);
+       vma_iter_free(&vmi);
        validate_mm(mm);
 
        return 0;
@@ -2057,7 +2006,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                                anon_vma_interval_tree_pre_update_vma(vma);
                                vma->vm_end = address;
                                /* Overwrite old entry in mtree. */
-                               vma_mas_store(vma, &mas);
+                               mas_set_range(&mas, vma->vm_start, address - 1);
+                               mas_store_prealloc(&mas, vma);
                                anon_vma_interval_tree_post_update_vma(vma);
                                spin_unlock(&mm->page_table_lock);
 
@@ -2139,7 +2089,8 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
                                vma->vm_start = address;
                                vma->vm_pgoff -= grow;
                                /* Overwrite old entry in mtree. */
-                               vma_mas_store(vma, &mas);
+                               mas_set_range(&mas, address, vma->vm_end - 1);
+                               mas_store_prealloc(&mas, vma);
                                anon_vma_interval_tree_post_update_vma(vma);
                                spin_unlock(&mm->page_table_lock);