#ifdef CONFIG_PER_VMA_LOCK
-static void unlock_vma(struct proc_maps_private *priv)
+static void unlock_ctx_vma(struct proc_maps_locking_ctx *lock_ctx)
{
- if (priv->locked_vma) {
- vma_end_read(priv->locked_vma);
- priv->locked_vma = NULL;
+ if (lock_ctx->locked_vma) {
+ vma_end_read(lock_ctx->locked_vma);
+ lock_ctx->locked_vma = NULL;
}
}
static const struct seq_operations proc_pid_maps_op;
static inline bool lock_vma_range(struct seq_file *m,
- struct proc_maps_private *priv)
+ struct proc_maps_locking_ctx *lock_ctx)
{
/*
* smaps and numa_maps perform page table walk, therefore require
* walking the vma tree under rcu read protection.
*/
if (m->op != &proc_pid_maps_op) {
- if (mmap_read_lock_killable(priv->mm))
+ if (mmap_read_lock_killable(lock_ctx->mm))
return false;
- priv->mmap_locked = true;
+ lock_ctx->mmap_locked = true;
} else {
rcu_read_lock();
- priv->locked_vma = NULL;
- priv->mmap_locked = false;
+ lock_ctx->locked_vma = NULL;
+ lock_ctx->mmap_locked = false;
}
return true;
}
-static inline void unlock_vma_range(struct proc_maps_private *priv)
+static inline void unlock_vma_range(struct proc_maps_locking_ctx *lock_ctx)
{
- if (priv->mmap_locked) {
- mmap_read_unlock(priv->mm);
+ if (lock_ctx->mmap_locked) {
+ mmap_read_unlock(lock_ctx->mm);
} else {
- unlock_vma(priv);
+ unlock_ctx_vma(lock_ctx);
rcu_read_unlock();
}
}
static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv,
loff_t last_pos)
{
+ struct proc_maps_locking_ctx *lock_ctx = &priv->lock_ctx;
struct vm_area_struct *vma;
- if (priv->mmap_locked)
+ if (lock_ctx->mmap_locked)
return vma_next(&priv->iter);
- unlock_vma(priv);
- vma = lock_next_vma(priv->mm, &priv->iter, last_pos);
+ unlock_ctx_vma(lock_ctx);
+ vma = lock_next_vma(lock_ctx->mm, &priv->iter, last_pos);
if (!IS_ERR_OR_NULL(vma))
- priv->locked_vma = vma;
+ lock_ctx->locked_vma = vma;
return vma;
}
static inline bool fallback_to_mmap_lock(struct proc_maps_private *priv,
loff_t pos)
{
- if (priv->mmap_locked)
+ struct proc_maps_locking_ctx *lock_ctx = &priv->lock_ctx;
+
+ if (lock_ctx->mmap_locked)
return false;
rcu_read_unlock();
- mmap_read_lock(priv->mm);
+ mmap_read_lock(lock_ctx->mm);
/* Reinitialize the iterator after taking mmap_lock */
vma_iter_set(&priv->iter, pos);
- priv->mmap_locked = true;
+ lock_ctx->mmap_locked = true;
return true;
}
#else /* CONFIG_PER_VMA_LOCK */
static inline bool lock_vma_range(struct seq_file *m,
- struct proc_maps_private *priv)
+ struct proc_maps_locking_ctx *lock_ctx)
{
- return mmap_read_lock_killable(priv->mm) == 0;
+ return mmap_read_lock_killable(lock_ctx->mm) == 0;
}
-static inline void unlock_vma_range(struct proc_maps_private *priv)
+static inline void unlock_vma_range(struct proc_maps_locking_ctx *lock_ctx)
{
- mmap_read_unlock(priv->mm);
+ mmap_read_unlock(lock_ctx->mm);
}
static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv,
*ppos = vma->vm_end;
} else {
*ppos = SENTINEL_VMA_GATE;
- vma = get_gate_vma(priv->mm);
+ vma = get_gate_vma(priv->lock_ctx.mm);
}
return vma;
static void *m_start(struct seq_file *m, loff_t *ppos)
{
struct proc_maps_private *priv = m->private;
+ struct proc_maps_locking_ctx *lock_ctx;
loff_t last_addr = *ppos;
struct mm_struct *mm;
if (!priv->task)
return ERR_PTR(-ESRCH);
- mm = priv->mm;
+ lock_ctx = &priv->lock_ctx;
+ mm = lock_ctx->mm;
if (!mm || !mmget_not_zero(mm)) {
put_task_struct(priv->task);
priv->task = NULL;
return NULL;
}
- if (!lock_vma_range(m, priv)) {
+ if (!lock_vma_range(m, lock_ctx)) {
mmput(mm);
put_task_struct(priv->task);
priv->task = NULL;
static void m_stop(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
- struct mm_struct *mm = priv->mm;
+ struct mm_struct *mm = priv->lock_ctx.mm;
if (!priv->task)
return;
release_task_mempolicy(priv);
- unlock_vma_range(priv);
+ unlock_vma_range(&priv->lock_ctx);
mmput(mm);
put_task_struct(priv->task);
priv->task = NULL;
return -ENOMEM;
priv->inode = inode;
- priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
- if (IS_ERR(priv->mm)) {
- int err = PTR_ERR(priv->mm);
+ priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ);
+ if (IS_ERR(priv->lock_ctx.mm)) {
+ int err = PTR_ERR(priv->lock_ctx.mm);
seq_release_private(inode, file);
return err;
struct seq_file *seq = file->private_data;
struct proc_maps_private *priv = seq->private;
- if (priv->mm)
- mmdrop(priv->mm);
+ if (priv->lock_ctx.mm)
+ mmdrop(priv->lock_ctx.mm);
return seq_release_private(inode, file);
}
if (!!karg.build_id_size != !!karg.build_id_addr)
return -EINVAL;
- mm = priv->mm;
+ mm = priv->lock_ctx.mm;
if (!mm || !mmget_not_zero(mm))
return -ESRCH;
{
struct proc_maps_private *priv = m->private;
struct mem_size_stats mss = {};
- struct mm_struct *mm = priv->mm;
+ struct mm_struct *mm = priv->lock_ctx.mm;
struct vm_area_struct *vma;
unsigned long vma_start = 0, last_vma_end = 0;
int ret = 0;
goto out_free;
priv->inode = inode;
- priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
- if (IS_ERR_OR_NULL(priv->mm)) {
- ret = priv->mm ? PTR_ERR(priv->mm) : -ESRCH;
+ priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ);
+ if (IS_ERR_OR_NULL(priv->lock_ctx.mm)) {
+ ret = priv->lock_ctx.mm ? PTR_ERR(priv->lock_ctx.mm) : -ESRCH;
single_release(inode, file);
goto out_free;
struct seq_file *seq = file->private_data;
struct proc_maps_private *priv = seq->private;
- if (priv->mm)
- mmdrop(priv->mm);
+ if (priv->lock_ctx.mm)
+ mmdrop(priv->lock_ctx.mm);
kfree(priv);
return single_release(inode, file);