#include <linux/mmu_context.h>
 #include <linux/miscdevice.h>
 #include <linux/mutex.h>
-#include <linux/rcupdate.h>
 #include <linux/poll.h>
 #include <linux/file.h>
 #include <linux/highmem.h>
        vq->call_ctx = NULL;
        vq->call = NULL;
        vq->log_ctx = NULL;
+       vq->memory = NULL;
 }
 
 static int vhost_worker(void *data)
 /* Caller should have device mutex */
 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
 {
+       int i;
+
        vhost_dev_cleanup(dev, true);
 
        /* Restore memory to default empty mapping. */
        memory->nregions = 0;
-       RCU_INIT_POINTER(dev->memory, memory);
+       dev->memory = memory;
+       /* We don't need VQ locks below since vhost_dev_cleanup makes sure
+        * VQs aren't running.
+        */
+       for (i = 0; i < dev->nvqs; ++i)
+               dev->vqs[i]->memory = memory;
 }
 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
 
                fput(dev->log_file);
        dev->log_file = NULL;
        /* No one will access memory at this point */
-       kfree(rcu_dereference_protected(dev->memory,
-                                       locked ==
-                                               lockdep_is_held(&dev->mutex)));
-       RCU_INIT_POINTER(dev->memory, NULL);
+       kfree(dev->memory);
+       dev->memory = NULL;
        WARN_ON(!list_empty(&dev->work_list));
        if (dev->worker) {
                kthread_stop(dev->worker);
 /* Caller should have device mutex but not vq mutex */
 int vhost_log_access_ok(struct vhost_dev *dev)
 {
-       struct vhost_memory *mp;
-
-       mp = rcu_dereference_protected(dev->memory,
-                                      lockdep_is_held(&dev->mutex));
-       return memory_access_ok(dev, mp, 1);
+       return memory_access_ok(dev, dev->memory, 1);
 }
 EXPORT_SYMBOL_GPL(vhost_log_access_ok);
 
 static int vq_log_access_ok(struct vhost_virtqueue *vq,
                            void __user *log_base)
 {
-       struct vhost_memory *mp;
        size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 
-       mp = rcu_dereference_protected(vq->dev->memory,
-                                      lockdep_is_held(&vq->mutex));
-       return vq_memory_access_ok(log_base, mp,
+       return vq_memory_access_ok(log_base, vq->memory,
                                   vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
                (!vq->log_used || log_access_ok(log_base, vq->log_addr,
                                        sizeof *vq->used +
                kfree(newmem);
                return -EFAULT;
        }
-       oldmem = rcu_dereference_protected(d->memory,
-                                          lockdep_is_held(&d->mutex));
-       rcu_assign_pointer(d->memory, newmem);
+       oldmem = d->memory;
+       d->memory = newmem;
 
-       /* All memory accesses are done under some VQ mutex.
-        * So below is a faster equivalent of synchronize_rcu()
-        */
+       /* All memory accesses are done under some VQ mutex. */
        for (i = 0; i < d->nvqs; ++i) {
                mutex_lock(&d->vqs[i]->mutex);
+               d->vqs[i]->memory = newmem;
                mutex_unlock(&d->vqs[i]->mutex);
        }
        kfree(oldmem);
 }
 EXPORT_SYMBOL_GPL(vhost_init_used);
 
-static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
+static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
                          struct iovec iov[], int iov_size)
 {
        const struct vhost_memory_region *reg;
        u64 s = 0;
        int ret = 0;
 
-       rcu_read_lock();
-
-       mem = rcu_dereference(dev->memory);
+       mem = vq->memory;
        while ((u64)len > s) {
                u64 size;
                if (unlikely(ret >= iov_size)) {
                ++ret;
        }
 
-       rcu_read_unlock();
        return ret;
 }
 
        return next;
 }
 
-static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
+static int get_indirect(struct vhost_virtqueue *vq,
                        struct iovec iov[], unsigned int iov_size,
                        unsigned int *out_num, unsigned int *in_num,
                        struct vhost_log *log, unsigned int *log_num,
                return -EINVAL;
        }
 
-       ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
+       ret = translate_desc(vq, indirect->addr, indirect->len, vq->indirect,
                             UIO_MAXIOV);
        if (unlikely(ret < 0)) {
                vq_err(vq, "Translation failure %d in indirect.\n", ret);
                        return -EINVAL;
                }
 
-               ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
+               ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count,
                                     iov_size - iov_count);
                if (unlikely(ret < 0)) {
                        vq_err(vq, "Translation failure %d indirect idx %d\n",
  * This function returns the descriptor number found, or vq->num (which is
  * never a valid descriptor number) if none was found.  A negative code is
  * returned on error. */
-int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
+int vhost_get_vq_desc(struct vhost_virtqueue *vq,
                      struct iovec iov[], unsigned int iov_size,
                      unsigned int *out_num, unsigned int *in_num,
                      struct vhost_log *log, unsigned int *log_num)
                        return -EFAULT;
                }
                if (desc.flags & VRING_DESC_F_INDIRECT) {
-                       ret = get_indirect(dev, vq, iov, iov_size,
+                       ret = get_indirect(vq, iov, iov_size,
                                           out_num, in_num,
                                           log, log_num, &desc);
                        if (unlikely(ret < 0)) {
                        continue;
                }
 
-               ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
+               ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count,
                                     iov_size - iov_count);
                if (unlikely(ret < 0)) {
                        vq_err(vq, "Translation failure %d descriptor idx %d\n",