}
 
        INIT_LIST_HEAD(&context->vma_private_list);
+       mutex_init(&context->vma_private_list_mutex);
        INIT_LIST_HEAD(&context->db_page_list);
        mutex_init(&context->db_page_mutex);
 
         * mlx5_ib_disassociate_ucontext().
         */
        mlx5_ib_vma_priv_data->vma = NULL;
+       mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
        list_del(&mlx5_ib_vma_priv_data->list);
+       mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
        kfree(mlx5_ib_vma_priv_data);
 }
 
                return -ENOMEM;
 
        vma_prv->vma = vma;
+       vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
        vma->vm_private_data = vma_prv;
        vma->vm_ops =  &mlx5_ib_vm_ops;
 
+       mutex_lock(&ctx->vma_private_list_mutex);
        list_add(&vma_prv->list, vma_head);
+       mutex_unlock(&ctx->vma_private_list_mutex);
 
        return 0;
 }
         * mlx5_ib_vma_close.
         */
        down_write(&owning_mm->mmap_sem);
+       mutex_lock(&context->vma_private_list_mutex);
        list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
                                 list) {
                vma = vma_private->vma;
                list_del(&vma_private->list);
                kfree(vma_private);
        }
+       mutex_unlock(&context->vma_private_list_mutex);
        up_write(&owning_mm->mmap_sem);
        mmput(owning_mm);
        put_task_struct(owning_process);
 
 struct mlx5_ib_vma_private_data {
        struct list_head list;
        struct vm_area_struct *vma;
+       /* protect vma_private_list add/del */
+       struct mutex *vma_private_list_mutex;
 };
 
 struct mlx5_ib_ucontext {
        /* Transport Domain number */
        u32                     tdn;
        struct list_head        vma_private_list;
+       /* protect vma_private_list add/del */
+       struct mutex            vma_private_list_mutex;
 
        unsigned long           upd_xlt_page;
        /* protect ODP/KSM */