vma);
}
+struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
+ unsigned long address);
+
#else /* CONFIG_PER_VMA_LOCK */
static inline void vma_init_lock(struct vm_area_struct *vma) {}
}
EXPORT_SYMBOL_GPL(handle_mm_fault);
+#ifdef CONFIG_PER_VMA_LOCK
+/*
+ * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be
+ * stable and not isolated. If the VMA is not found or is being modified the
+ * function returns NULL.
+ */
+struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
+ unsigned long address)
+{
+ MA_STATE(mas, &mm->mm_mt, address, address);
+ struct vm_area_struct *vma, *validate;
+
+ rcu_read_lock();
+ vma = mas_walk(&mas);
+retry:
+ if (!vma)
+ goto inval;
+
+ if (!vma_is_anonymous(vma))
+ goto inval;
+
+ if (!vma_read_trylock(vma)) {
+ count_vm_vma_lock_event(VMA_LOCK_ABORT);
+ goto inval;
+ }
+
+ /* Check if the VMA got isolated after we found it */
+ mas.index = address;
+ validate = mas_walk(&mas);
+ if (validate != vma) {
+ vma_read_unlock(vma);
+ count_vm_vma_lock_event(VMA_LOCK_MISS);
+ /* The area was replaced with another one. */
+ vma = validate;
+ goto retry;
+ }
+
+ rcu_read_unlock();
+ return vma;
+inval:
+ rcu_read_unlock();
+ return NULL;
+}
+#endif /* CONFIG_PER_VMA_LOCK */
+
#ifndef __PAGETABLE_P4D_FOLDED
/*
* Allocate p4d page table.