};
#ifdef CONFIG_PER_VMA_LOCK
-static inline void vma_init_lock(struct vm_area_struct *vma)
-{
- init_rwsem(&vma->lock);
- vma->vm_lock_seq = -1;
-}
+
+void vma_lock_init(struct vm_area_struct *vma);
+void vma_lock_free(struct vm_area_struct *vma);
static inline void vma_write_lock(struct vm_area_struct *vma)
{
if (vma->vm_lock_seq == mm_lock_seq)
return;
- down_write(&vma->lock);
+ down_write(vma->lock);
vma->vm_lock_seq = mm_lock_seq;
- up_write(&vma->lock);
+ up_write(vma->lock);
}
+/*
+ * Try to read-lock a vma. The function is allowed to occasionally yield false
+ * locked result to avoid performance overhead, in which case we fall back to
+ * using mmap_lock. The function should never yield false unlocked result.
+ */
static inline bool vma_read_trylock(struct vm_area_struct *vma)
{
- if (unlikely(down_read_trylock(&vma->lock) == 0))
+ /* Check before locking. A race might cause false locked result. */
+ if (vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))
+ return false;
+
+ if (unlikely(down_read_trylock(vma->lock) == 0))
return false;
/*
- * Overflow might produce false locked result but it's not critical
- * because we just fall back to using mmap_lock in such case.
- * False unlocked result is critical but is impossible because we
- * modify and check vma->vm_lock_seq under vma->lock protection and
- * mm->mm_lock_seq modification invalidates all existing locks.
+ * Overflow might produce false locked result.
+ * False unlocked result is impossible because we modify and check
+ * vma->vm_lock_seq under vma->lock protection and mm->mm_lock_seq
+ * modification invalidates all existing locks.
*/
- if (vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq)) {
- up_read(&vma->lock);
+ if (unlikely(vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))) {
+ up_read(vma->lock);
return false;
}
return true;
static inline void vma_read_unlock(struct vm_area_struct *vma)
{
- up_read(&vma->lock);
+ up_read(vma->lock);
}
static inline void vma_assert_locked(struct vm_area_struct *vma)
{
- lockdep_assert_held(&vma->lock);
- VM_BUG_ON_VMA(!rwsem_is_locked(&vma->lock), vma);
+ lockdep_assert_held(vma->lock);
+ VM_BUG_ON_VMA(!rwsem_is_locked(vma->lock), vma);
}
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
static inline void vma_assert_no_reader(struct vm_area_struct *vma)
{
- VM_BUG_ON_VMA(rwsem_is_locked(&vma->lock) &&
+ VM_BUG_ON_VMA(rwsem_is_locked(vma->lock) &&
vma->vm_lock_seq != READ_ONCE(vma->vm_mm->mm_lock_seq),
vma);
}
#else /* CONFIG_PER_VMA_LOCK */
-static inline void vma_init_lock(struct vm_area_struct *vma) {}
+static inline void vma_lock_init(struct vm_area_struct *vma) {}
+static inline void vma_lock_free(struct vm_area_struct *vma) {}
static inline void vma_write_lock(struct vm_area_struct *vma) {}
static inline bool vma_read_trylock(struct vm_area_struct *vma)
{ return false; }
vma->vm_mm = mm;
vma->vm_ops = &dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma_init_lock(vma);
+ vma_lock_init(vma);
}
/* Use when VMA is not part of the VMA tree and needs no locking */