]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: mark vma as detached until it's added into vma tree
authorSuren Baghdasaryan <surenb@google.com>
Thu, 13 Feb 2025 22:46:40 +0000 (14:46 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 4 Mar 2025 05:49:55 +0000 (21:49 -0800)
Current implementation does not set detached flag when a VMA is first
allocated.  This does not represent the real state of the VMA, which is
detached until it is added into mm's VMA tree.  Fix this by marking new
VMAs as detached and resetting detached flag only after VMA is added into
a tree.

Introduce vma_mark_attached() to make the API more readable and to
simplify possible future cleanup when vma->vm_mm might be used to indicate
detached vma and vma_mark_attached() will need an additional mm parameter.

Link: https://lkml.kernel.org/r/20250213224655.1680278-4-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Tested-by: Shivank Garg <shivankg@amd.com>
Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com
Cc: Christian Brauner <brauner@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Klara Modin <klarasmodin@gmail.com>
Cc: Lokesh Gidra <lokeshgidra@google.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Minchan Kim <minchan@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: "Paul E . McKenney" <paulmck@kernel.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Sourav Panda <souravpanda@google.com>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Will Deacon <will@kernel.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
kernel/fork.c
mm/memory.c
mm/vma.c
mm/vma.h
tools/testing/vma/vma_internal.h

index 80bf3401e2d2cab7b11b317e6da8400f3df51650..974ac121cafa5d30304d38cfe0671b8a5fdb73fb 100644 (file)
@@ -821,12 +821,21 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
                vma_assert_write_locked(vma);
 }
 
-static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
+static inline void vma_mark_attached(struct vm_area_struct *vma)
+{
+       vma->detached = false;
+}
+
+static inline void vma_mark_detached(struct vm_area_struct *vma)
 {
        /* When detaching vma should be write-locked */
-       if (detached)
-               vma_assert_write_locked(vma);
-       vma->detached = detached;
+       vma_assert_write_locked(vma);
+       vma->detached = true;
+}
+
+static inline bool is_vma_detached(struct vm_area_struct *vma)
+{
+       return vma->detached;
 }
 
 static inline void release_fault_lock(struct vm_fault *vmf)
@@ -857,8 +866,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {}
 static inline void vma_start_write(struct vm_area_struct *vma) {}
 static inline void vma_assert_write_locked(struct vm_area_struct *vma)
                { mmap_assert_write_locked(vma->vm_mm); }
-static inline void vma_mark_detached(struct vm_area_struct *vma,
-                                    bool detached) {}
+static inline void vma_mark_attached(struct vm_area_struct *vma) {}
+static inline void vma_mark_detached(struct vm_area_struct *vma) {}
 
 static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
                unsigned long address)
@@ -891,7 +900,10 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
        vma->vm_mm = mm;
        vma->vm_ops = &vma_dummy_vm_ops;
        INIT_LIST_HEAD(&vma->anon_vma_chain);
-       vma_mark_detached(vma, false);
+#ifdef CONFIG_PER_VMA_LOCK
+       /* vma is not locked, can't use vma_mark_detached() */
+       vma->detached = true;
+#endif
        vma_numab_state_init(vma);
        vma_lock_init(vma);
 }
@@ -1086,6 +1098,7 @@ static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
        if (unlikely(mas_is_err(&vmi->mas)))
                return -ENOMEM;
 
+       vma_mark_attached(vma);
        return 0;
 }
 
index bdbabe73fb294e51f8389bb4d15ce401e825e6c2..5bf3e407c795b69fff462b5ba857fab372186ba8 100644 (file)
@@ -465,6 +465,10 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
        data_race(memcpy(new, orig, sizeof(*new)));
        vma_lock_init(new);
        INIT_LIST_HEAD(&new->anon_vma_chain);
+#ifdef CONFIG_PER_VMA_LOCK
+       /* vma is not locked, can't use vma_mark_detached() */
+       new->detached = true;
+#endif
        vma_numab_state_init(new);
        dup_anon_vma_name(orig, new);
 
index d2297e08f0f369f136895157319dcc751fe023b7..c810c1808cfcf583260b4959be8a217e5f6837fe 100644 (file)
@@ -6376,7 +6376,7 @@ retry:
                goto inval;
 
        /* Check if the VMA got isolated after we found it */
-       if (vma->detached) {
+       if (is_vma_detached(vma)) {
                vma_end_read(vma);
                count_vm_vma_lock_event(VMA_LOCK_MISS);
                /* The area was replaced with another one */
index 4b52737a1ff8da475204ddc774b782548d9a2491..0398383d4382627f02135e94f8d33fc18f707dce 100644 (file)
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -341,7 +341,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
 
        if (vp->remove) {
 again:
-               vma_mark_detached(vp->remove, true);
+               vma_mark_detached(vp->remove);
                if (vp->file) {
                        uprobe_munmap(vp->remove, vp->remove->vm_start,
                                      vp->remove->vm_end);
@@ -1238,7 +1238,7 @@ static void reattach_vmas(struct ma_state *mas_detach)
 
        mas_set(mas_detach, 0);
        mas_for_each(mas_detach, vma, ULONG_MAX)
-               vma_mark_detached(vma, false);
+               vma_mark_attached(vma);
 
        __mt_destroy(mas_detach->tree);
 }
@@ -1313,7 +1313,7 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
                if (error)
                        goto munmap_gather_failed;
 
-               vma_mark_detached(next, true);
+               vma_mark_detached(next);
                nrpages = vma_pages(next);
 
                vms->nr_pages += nrpages;
index e55e68abfbe3a8ca7780b5cb1057bcccaa5c561f..bffb56afce5f4275b5bf676f12358212bdcd0fb3 100644 (file)
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -205,6 +205,7 @@ static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
        if (unlikely(mas_is_err(&vmi->mas)))
                return -ENOMEM;
 
+       vma_mark_attached(vma);
        return 0;
 }
 
@@ -437,6 +438,7 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
 
        __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
        mas_store_prealloc(&vmi->mas, vma);
+       vma_mark_attached(vma);
 }
 
 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
index 4506e6fb3c6fa95691240d81059c5f7cfe47ecc1..f93f7f74f97b28cb8f713a2eecd9b3b47763a7bd 100644 (file)
@@ -471,12 +471,16 @@ static inline void vma_lock_init(struct vm_area_struct *vma)
 }
 
 static inline void vma_assert_write_locked(struct vm_area_struct *);
-static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
+static inline void vma_mark_attached(struct vm_area_struct *vma)
+{
+       vma->detached = false;
+}
+
+static inline void vma_mark_detached(struct vm_area_struct *vma)
 {
        /* When detaching vma should be write-locked */
-       if (detached)
-               vma_assert_write_locked(vma);
-       vma->detached = detached;
+       vma_assert_write_locked(vma);
+       vma->detached = true;
 }
 
 extern const struct vm_operations_struct vma_dummy_vm_ops;
@@ -489,7 +493,8 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
        vma->vm_mm = mm;
        vma->vm_ops = &vma_dummy_vm_ops;
        INIT_LIST_HEAD(&vma->anon_vma_chain);
-       vma_mark_detached(vma, false);
+       /* vma is not locked, can't use vma_mark_detached() */
+       vma->detached = true;
        vma_lock_init(vma);
 }
 
@@ -515,6 +520,8 @@ static inline struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
        memcpy(new, orig, sizeof(*new));
        vma_lock_init(new);
        INIT_LIST_HEAD(&new->anon_vma_chain);
+       /* vma is not locked, can't use vma_mark_detached() */
+       new->detached = true;
 
        return new;
 }