]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: introduce vma detached flag
authorSuren Baghdasaryan <surenb@google.com>
Mon, 27 Feb 2023 17:36:21 +0000 (09:36 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 28 Mar 2023 23:24:54 +0000 (16:24 -0700)
Per-vma locking mechanism will search for VMA under RCU protection and
then after locking it, has to ensure it was not removed from the VMA tree
after we found it.  To make this check efficient, introduce a
vma->detached flag to mark VMAs which were removed from the VMA tree.

Link: https://lkml.kernel.org/r/20230227173632.3292573-23-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
include/linux/mm_types.h
mm/mmap.c

index d7ed0a36d750c4f0b37e3a155bc1f9db446ea50a..2bf46a1cb86f9b84bfabfd60548ddfd8f888a7e5 100644 (file)
@@ -710,6 +710,14 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma)
        VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
 }
 
+static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
+{
+       /* When detaching vma should be write-locked */
+       if (detached)
+               vma_assert_write_locked(vma);
+       vma->detached = detached;
+}
+
 #else /* CONFIG_PER_VMA_LOCK */
 
 static inline void vma_init_lock(struct vm_area_struct *vma) {}
@@ -720,6 +728,8 @@ static inline void vma_start_write(struct vm_area_struct *vma) {}
 static inline bool vma_try_start_write(struct vm_area_struct *vma)
                { return true; }
 static inline void vma_assert_write_locked(struct vm_area_struct *vma) {}
+static inline void vma_mark_detached(struct vm_area_struct *vma,
+                                    bool detached) {}
 
 #endif /* CONFIG_PER_VMA_LOCK */
 
@@ -731,6 +741,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
        vma->vm_mm = mm;
        vma->vm_ops = &dummy_vm_ops;
        INIT_LIST_HEAD(&vma->anon_vma_chain);
+       vma_mark_detached(vma, false);
        vma_init_lock(vma);
 }
 
index 84a9c0717fe1710e3686e2f0d7138c951ee2c30d..bad9f456e88ac9eb81378078a263a75e8bbde053 100644 (file)
@@ -511,6 +511,9 @@ struct vm_area_struct {
 #ifdef CONFIG_PER_VMA_LOCK
        int vm_lock_seq;
        struct rw_semaphore lock;
+
+       /* Flag to indicate areas detached from the mm->mm_mt tree */
+       bool detached;
 #endif
 
        /*
index 22fb4fd80aa769bf2ad15a02c9988953baac33d8..40c71e3345ca75b7babc31194b944ab28d703388 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -599,6 +599,7 @@ static inline void vma_complete(struct vma_prepare *vp,
 
        if (vp->remove) {
 again:
+               vma_mark_detached(vp->remove, true);
                if (vp->file) {
                        uprobe_munmap(vp->remove, vp->remove->vm_start,
                                      vp->remove->vm_end);
@@ -2260,6 +2261,7 @@ static inline int munmap_sidetree(struct vm_area_struct *vma,
        if (mas_store_gfp(mas_detach, vma, GFP_KERNEL))
                return -ENOMEM;
 
+       vma_mark_detached(vma, true);
        if (vma->vm_flags & VM_LOCKED)
                vma->vm_mm->locked_vm -= vma_pages(vma);