};
 
 #ifdef CONFIG_PER_VMA_LOCK
-static inline void vma_init_lock(struct vm_area_struct *vma)
-{
-       init_rwsem(&vma->lock);
-       vma->vm_lock_seq = -1;
-}
-
 /*
  * Try to read-lock a vma. The function is allowed to occasionally yield false
  * locked result to avoid performance overhead, in which case we fall back to
        if (vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))
                return false;
 
-       if (unlikely(down_read_trylock(&vma->lock) == 0))
+       if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0))
                return false;
 
        /*
         * Overflow might produce false locked result.
         * False unlocked result is impossible because we modify and check
-        * vma->vm_lock_seq under vma->lock protection and mm->mm_lock_seq
+        * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq
         * modification invalidates all existing locks.
         */
        if (unlikely(vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))) {
-               up_read(&vma->lock);
+               up_read(&vma->vm_lock->lock);
                return false;
        }
        return true;
 static inline void vma_end_read(struct vm_area_struct *vma)
 {
        rcu_read_lock(); /* keeps vma alive till the end of up_read */
-       up_read(&vma->lock);
+       up_read(&vma->vm_lock->lock);
        rcu_read_unlock();
 }
 
        if (__is_vma_write_locked(vma, &mm_lock_seq))
                return;
 
-       down_write(&vma->lock);
+       down_write(&vma->vm_lock->lock);
        vma->vm_lock_seq = mm_lock_seq;
-       up_write(&vma->lock);
+       up_write(&vma->vm_lock->lock);
 }
 
 static inline bool vma_try_start_write(struct vm_area_struct *vma)
 
 #endif /* CONFIG_PER_VMA_LOCK */
 
+/*
+ * WARNING: vma_init does not initialize vma->vm_lock.
+ * Use vm_area_alloc()/vm_area_free() if vma needs locking.
+ */
 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
 {
        static const struct vm_operations_struct dummy_vm_ops = {};
        vma->vm_ops = &dummy_vm_ops;
        INIT_LIST_HEAD(&vma->anon_vma_chain);
        vma_mark_detached(vma, false);
-       vma_init_lock(vma);
 }
 
 /* Use when VMA is not part of the VMA tree and needs no locking */
 
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
+#ifdef CONFIG_PER_VMA_LOCK
+
+/* SLAB cache for vm_area_struct.lock */
+static struct kmem_cache *vma_lock_cachep;
+
+static bool vma_lock_alloc(struct vm_area_struct *vma)
+{
+       vma->vm_lock = kmem_cache_alloc(vma_lock_cachep, GFP_KERNEL);
+       if (!vma->vm_lock)
+               return false;
+
+       init_rwsem(&vma->vm_lock->lock);
+       vma->vm_lock_seq = -1;
+
+       return true;
+}
+
+static inline void vma_lock_free(struct vm_area_struct *vma)
+{
+       kmem_cache_free(vma_lock_cachep, vma->vm_lock);
+}
+
+#else /* CONFIG_PER_VMA_LOCK */
+
+static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; }
+static inline void vma_lock_free(struct vm_area_struct *vma) {}
+
+#endif /* CONFIG_PER_VMA_LOCK */
+
 struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
 {
        struct vm_area_struct *vma;
 
        vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-       if (vma)
-               vma_init(vma, mm);
+       if (!vma)
+               return NULL;
+
+       vma_init(vma, mm);
+       if (!vma_lock_alloc(vma)) {
+               kmem_cache_free(vm_area_cachep, vma);
+               return NULL;
+       }
+
        return vma;
 }
 
 {
        struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 
-       if (new) {
-               ASSERT_EXCLUSIVE_WRITER(orig->vm_flags);
-               ASSERT_EXCLUSIVE_WRITER(orig->vm_file);
-               /*
-                * orig->shared.rb may be modified concurrently, but the clone
-                * will be reinitialized.
-                */
-               data_race(memcpy(new, orig, sizeof(*new)));
-               INIT_LIST_HEAD(&new->anon_vma_chain);
-               vma_init_lock(new);
-               dup_anon_vma_name(orig, new);
+       if (!new)
+               return NULL;
+
+       ASSERT_EXCLUSIVE_WRITER(orig->vm_flags);
+       ASSERT_EXCLUSIVE_WRITER(orig->vm_file);
+       /*
+        * orig->shared.rb may be modified concurrently, but the clone
+        * will be reinitialized.
+        */
+       data_race(memcpy(new, orig, sizeof(*new)));
+       if (!vma_lock_alloc(new)) {
+               kmem_cache_free(vm_area_cachep, new);
+               return NULL;
        }
+       INIT_LIST_HEAD(&new->anon_vma_chain);
+       dup_anon_vma_name(orig, new);
+
        return new;
 }
 
 void __vm_area_free(struct vm_area_struct *vma)
 {
        free_anon_vma_name(vma);
+       vma_lock_free(vma);
        kmem_cache_free(vm_area_cachep, vma);
 }
 
                                                  vm_rcu);
 
        /* The vma should not be locked while being destroyed. */
-       VM_BUG_ON_VMA(rwsem_is_locked(&vma->lock), vma);
+       VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock->lock), vma);
        __vm_area_free(vma);
 }
 #endif
                        NULL);
 
        vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
+#ifdef CONFIG_PER_VMA_LOCK
+       vma_lock_cachep = KMEM_CACHE(vma_lock, SLAB_PANIC|SLAB_ACCOUNT);
+#endif
        mmap_init();
        nsproxy_cache_init();
 }