]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
fs/proc/task_mmu: factor out proc_maps_private fields used by PROCMAP_QUERY
authorSuren Baghdasaryan <surenb@google.com>
Fri, 8 Aug 2025 15:28:48 +0000 (08:28 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:24:42 +0000 (17:24 -0700)
Refactor struct proc_maps_private so that the fields used by PROCMAP_QUERY
ioctl are moved into a separate structure. In the next patch this allows
ioctl to reuse some of the functions used for reading /proc/pid/maps
without using file->private_data. This prevents concurrent modification
of file->private_data members by ioctl and /proc/pid/maps readers.

The change is pure code refactoring and has no functional changes.

Link: https://lkml.kernel.org/r/20250808152850.2580887-3-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: SeongJae Park <sj@kernel.org>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: "Paul E . McKenney" <paulmck@kernel.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Thomas Weißschuh <linux@weissschuh.net>
Cc: T.J. Mercier <tjmercier@google.com>
Cc: Ye Bin <yebin10@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/proc/internal.h
fs/proc/task_mmu.c
fs/proc/task_nommu.c

index e737401d7383683297fd472dfe1bcd5aa1c45ad7..d1598576506c1e7b161e0967027ab361cfb0313f 100644 (file)
@@ -378,16 +378,21 @@ extern void proc_self_init(void);
  * task_[no]mmu.c
  */
 struct mem_size_stats;
-struct proc_maps_private {
-       struct inode *inode;
-       struct task_struct *task;
+
+struct proc_maps_locking_ctx {
        struct mm_struct *mm;
-       struct vma_iterator iter;
-       loff_t last_pos;
 #ifdef CONFIG_PER_VMA_LOCK
        bool mmap_locked;
        struct vm_area_struct *locked_vma;
 #endif
+};
+
+struct proc_maps_private {
+       struct inode *inode;
+       struct task_struct *task;
+       struct vma_iterator iter;
+       loff_t last_pos;
+       struct proc_maps_locking_ctx lock_ctx;
 #ifdef CONFIG_NUMA
        struct mempolicy *task_mempolicy;
 #endif
index 29cca0e6d0ff5a96c4c078400269e3ca05258650..c0968d293b61a5406d7cd0e9f920add13c564f7c 100644 (file)
@@ -132,18 +132,18 @@ static void release_task_mempolicy(struct proc_maps_private *priv)
 
 #ifdef CONFIG_PER_VMA_LOCK
 
-static void unlock_vma(struct proc_maps_private *priv)
+static void unlock_ctx_vma(struct proc_maps_locking_ctx *lock_ctx)
 {
-       if (priv->locked_vma) {
-               vma_end_read(priv->locked_vma);
-               priv->locked_vma = NULL;
+       if (lock_ctx->locked_vma) {
+               vma_end_read(lock_ctx->locked_vma);
+               lock_ctx->locked_vma = NULL;
        }
 }
 
 static const struct seq_operations proc_pid_maps_op;
 
 static inline bool lock_vma_range(struct seq_file *m,
-                                 struct proc_maps_private *priv)
+                                 struct proc_maps_locking_ctx *lock_ctx)
 {
        /*
         * smaps and numa_maps perform page table walk, therefore require
@@ -151,25 +151,25 @@ static inline bool lock_vma_range(struct seq_file *m,
         * walking the vma tree under rcu read protection.
         */
        if (m->op != &proc_pid_maps_op) {
-               if (mmap_read_lock_killable(priv->mm))
+               if (mmap_read_lock_killable(lock_ctx->mm))
                        return false;
 
-               priv->mmap_locked = true;
+               lock_ctx->mmap_locked = true;
        } else {
                rcu_read_lock();
-               priv->locked_vma = NULL;
-               priv->mmap_locked = false;
+               lock_ctx->locked_vma = NULL;
+               lock_ctx->mmap_locked = false;
        }
 
        return true;
 }
 
-static inline void unlock_vma_range(struct proc_maps_private *priv)
+static inline void unlock_vma_range(struct proc_maps_locking_ctx *lock_ctx)
 {
-       if (priv->mmap_locked) {
-               mmap_read_unlock(priv->mm);
+       if (lock_ctx->mmap_locked) {
+               mmap_read_unlock(lock_ctx->mm);
        } else {
-               unlock_vma(priv);
+               unlock_ctx_vma(lock_ctx);
                rcu_read_unlock();
        }
 }
@@ -177,15 +177,16 @@ static inline void unlock_vma_range(struct proc_maps_private *priv)
 static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv,
                                           loff_t last_pos)
 {
+       struct proc_maps_locking_ctx *lock_ctx = &priv->lock_ctx;
        struct vm_area_struct *vma;
 
-       if (priv->mmap_locked)
+       if (lock_ctx->mmap_locked)
                return vma_next(&priv->iter);
 
-       unlock_vma(priv);
-       vma = lock_next_vma(priv->mm, &priv->iter, last_pos);
+       unlock_ctx_vma(lock_ctx);
+       vma = lock_next_vma(lock_ctx->mm, &priv->iter, last_pos);
        if (!IS_ERR_OR_NULL(vma))
-               priv->locked_vma = vma;
+               lock_ctx->locked_vma = vma;
 
        return vma;
 }
@@ -193,14 +194,16 @@ static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv,
 static inline bool fallback_to_mmap_lock(struct proc_maps_private *priv,
                                         loff_t pos)
 {
-       if (priv->mmap_locked)
+       struct proc_maps_locking_ctx *lock_ctx = &priv->lock_ctx;
+
+       if (lock_ctx->mmap_locked)
                return false;
 
        rcu_read_unlock();
-       mmap_read_lock(priv->mm);
+       mmap_read_lock(lock_ctx->mm);
        /* Reinitialize the iterator after taking mmap_lock */
        vma_iter_set(&priv->iter, pos);
-       priv->mmap_locked = true;
+       lock_ctx->mmap_locked = true;
 
        return true;
 }
@@ -208,14 +211,14 @@ static inline bool fallback_to_mmap_lock(struct proc_maps_private *priv,
 #else /* CONFIG_PER_VMA_LOCK */
 
 static inline bool lock_vma_range(struct seq_file *m,
-                                 struct proc_maps_private *priv)
+                                 struct proc_maps_locking_ctx *lock_ctx)
 {
-       return mmap_read_lock_killable(priv->mm) == 0;
+       return mmap_read_lock_killable(lock_ctx->mm) == 0;
 }
 
-static inline void unlock_vma_range(struct proc_maps_private *priv)
+static inline void unlock_vma_range(struct proc_maps_locking_ctx *lock_ctx)
 {
-       mmap_read_unlock(priv->mm);
+       mmap_read_unlock(lock_ctx->mm);
 }
 
 static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv,
@@ -258,7 +261,7 @@ retry:
                *ppos = vma->vm_end;
        } else {
                *ppos = SENTINEL_VMA_GATE;
-               vma = get_gate_vma(priv->mm);
+               vma = get_gate_vma(priv->lock_ctx.mm);
        }
 
        return vma;
@@ -267,6 +270,7 @@ retry:
 static void *m_start(struct seq_file *m, loff_t *ppos)
 {
        struct proc_maps_private *priv = m->private;
+       struct proc_maps_locking_ctx *lock_ctx;
        loff_t last_addr = *ppos;
        struct mm_struct *mm;
 
@@ -278,14 +282,15 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
        if (!priv->task)
                return ERR_PTR(-ESRCH);
 
-       mm = priv->mm;
+       lock_ctx = &priv->lock_ctx;
+       mm = lock_ctx->mm;
        if (!mm || !mmget_not_zero(mm)) {
                put_task_struct(priv->task);
                priv->task = NULL;
                return NULL;
        }
 
-       if (!lock_vma_range(m, priv)) {
+       if (!lock_vma_range(m, lock_ctx)) {
                mmput(mm);
                put_task_struct(priv->task);
                priv->task = NULL;
@@ -318,13 +323,13 @@ static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
 static void m_stop(struct seq_file *m, void *v)
 {
        struct proc_maps_private *priv = m->private;
-       struct mm_struct *mm = priv->mm;
+       struct mm_struct *mm = priv->lock_ctx.mm;
 
        if (!priv->task)
                return;
 
        release_task_mempolicy(priv);
-       unlock_vma_range(priv);
+       unlock_vma_range(&priv->lock_ctx);
        mmput(mm);
        put_task_struct(priv->task);
        priv->task = NULL;
@@ -339,9 +344,9 @@ static int proc_maps_open(struct inode *inode, struct file *file,
                return -ENOMEM;
 
        priv->inode = inode;
-       priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
-       if (IS_ERR(priv->mm)) {
-               int err = PTR_ERR(priv->mm);
+       priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ);
+       if (IS_ERR(priv->lock_ctx.mm)) {
+               int err = PTR_ERR(priv->lock_ctx.mm);
 
                seq_release_private(inode, file);
                return err;
@@ -355,8 +360,8 @@ static int proc_map_release(struct inode *inode, struct file *file)
        struct seq_file *seq = file->private_data;
        struct proc_maps_private *priv = seq->private;
 
-       if (priv->mm)
-               mmdrop(priv->mm);
+       if (priv->lock_ctx.mm)
+               mmdrop(priv->lock_ctx.mm);
 
        return seq_release_private(inode, file);
 }
@@ -610,7 +615,7 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
        if (!!karg.build_id_size != !!karg.build_id_addr)
                return -EINVAL;
 
-       mm = priv->mm;
+       mm = priv->lock_ctx.mm;
        if (!mm || !mmget_not_zero(mm))
                return -ESRCH;
 
@@ -1311,7 +1316,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
 {
        struct proc_maps_private *priv = m->private;
        struct mem_size_stats mss = {};
-       struct mm_struct *mm = priv->mm;
+       struct mm_struct *mm = priv->lock_ctx.mm;
        struct vm_area_struct *vma;
        unsigned long vma_start = 0, last_vma_end = 0;
        int ret = 0;
@@ -1456,9 +1461,9 @@ static int smaps_rollup_open(struct inode *inode, struct file *file)
                goto out_free;
 
        priv->inode = inode;
-       priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
-       if (IS_ERR_OR_NULL(priv->mm)) {
-               ret = priv->mm ? PTR_ERR(priv->mm) : -ESRCH;
+       priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ);
+       if (IS_ERR_OR_NULL(priv->lock_ctx.mm)) {
+               ret = priv->lock_ctx.mm ? PTR_ERR(priv->lock_ctx.mm) : -ESRCH;
 
                single_release(inode, file);
                goto out_free;
@@ -1476,8 +1481,8 @@ static int smaps_rollup_release(struct inode *inode, struct file *file)
        struct seq_file *seq = file->private_data;
        struct proc_maps_private *priv = seq->private;
 
-       if (priv->mm)
-               mmdrop(priv->mm);
+       if (priv->lock_ctx.mm)
+               mmdrop(priv->lock_ctx.mm);
 
        kfree(priv);
        return single_release(inode, file);
index 59bfd61d653aaead0d06ce601b3685c1c6c12992..d362919f4f688e390991f57a1dc010b82863965e 100644 (file)
@@ -204,7 +204,7 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
        if (!priv->task)
                return ERR_PTR(-ESRCH);
 
-       mm = priv->mm;
+       mm = priv->lock_ctx.mm;
        if (!mm || !mmget_not_zero(mm)) {
                put_task_struct(priv->task);
                priv->task = NULL;
@@ -226,7 +226,7 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
 static void m_stop(struct seq_file *m, void *v)
 {
        struct proc_maps_private *priv = m->private;
-       struct mm_struct *mm = priv->mm;
+       struct mm_struct *mm = priv->lock_ctx.mm;
 
        if (!priv->task)
                return;
@@ -259,9 +259,9 @@ static int maps_open(struct inode *inode, struct file *file,
                return -ENOMEM;
 
        priv->inode = inode;
-       priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
-       if (IS_ERR_OR_NULL(priv->mm)) {
-               int err = priv->mm ? PTR_ERR(priv->mm) : -ESRCH;
+       priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ);
+       if (IS_ERR_OR_NULL(priv->lock_ctx.mm)) {
+               int err = priv->lock_ctx.mm ? PTR_ERR(priv->lock_ctx.mm) : -ESRCH;
 
                seq_release_private(inode, file);
                return err;
@@ -276,8 +276,8 @@ static int map_release(struct inode *inode, struct file *file)
        struct seq_file *seq = file->private_data;
        struct proc_maps_private *priv = seq->private;
 
-       if (priv->mm)
-               mmdrop(priv->mm);
+       if (priv->lock_ctx.mm)
+               mmdrop(priv->lock_ctx.mm);
 
        return seq_release_private(inode, file);
 }