static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
 {
-
-       return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
-                                     sizeof(val));
+       return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, &val,
+                                          sizeof(val));
 }
 
 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
 {
-
-       return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
-                                     sizeof(*val));
+       return kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, val,
+                                         sizeof(*val));
 }
 
 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
        if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
                return;
 
-       if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
-                                 sizeof(u32)))
+       if (kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data,
+                                      sizeof(u32)))
                return;
 
        apic_set_tpr(vcpu->arch.apic, data & 0xff);
                max_isr = 0;
        data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
 
-       kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
-                               sizeof(u32));
+       kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data,
+                                   sizeof(u32));
 }
 
 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
 {
        if (vapic_addr) {
-               if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+               if (kvm_vcpu_gfn_to_hva_cache_init(vcpu,
                                        &vcpu->arch.apic->vapic_cache,
                                        vapic_addr, sizeof(u32)))
                        return -EINVAL;
        vcpu->arch.pv_eoi.msr_val = data;
        if (!pv_eoi_enabled(vcpu))
                return 0;
-       return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
+       return kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.pv_eoi.data,
                                         addr, sizeof(u8));
 }
 
 
        struct kvm_vcpu_arch *vcpu = &v->arch;
        struct pvclock_vcpu_time_info guest_hv_clock;
 
-       if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+       if (unlikely(kvm_vcpu_read_guest_cached(v, &vcpu->pv_time,
                &guest_hv_clock, sizeof(guest_hv_clock))))
                return;
 
        BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
 
        vcpu->hv_clock.version = guest_hv_clock.version + 1;
-       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
-                               &vcpu->hv_clock,
-                               sizeof(vcpu->hv_clock.version));
+       kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
+                                   &vcpu->hv_clock,
+                                   sizeof(vcpu->hv_clock.version));
 
        smp_wmb();
 
 
        trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
 
-       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
-                               &vcpu->hv_clock,
-                               sizeof(vcpu->hv_clock));
+       kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
+                                   &vcpu->hv_clock,
+                                   sizeof(vcpu->hv_clock));
 
        smp_wmb();
 
        vcpu->hv_clock.version++;
-       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
-                               &vcpu->hv_clock,
-                               sizeof(vcpu->hv_clock.version));
+       kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
+                                   &vcpu->hv_clock,
+                                   sizeof(vcpu->hv_clock.version));
 }
 
 static int kvm_guest_time_update(struct kvm_vcpu *v)
                return 0;
        }
 
-       if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
+       if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.apf.data, gpa,
                                        sizeof(u32)))
                return 1;
 
        if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
                return;
 
-       if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+       if (unlikely(kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.st.stime,
                &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
                return;
 
 
        vcpu->arch.st.steal.version += 1;
 
-       kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+       kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
                &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 
        smp_wmb();
                vcpu->arch.st.last_steal;
        vcpu->arch.st.last_steal = current->sched_info.run_delay;
 
-       kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+       kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
                &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 
        smp_wmb();
 
        vcpu->arch.st.steal.version += 1;
 
-       kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+       kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
                &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 }
 
                if (!(data & 1))
                        break;
 
-               if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+               if (kvm_vcpu_gfn_to_hva_cache_init(vcpu,
                     &vcpu->arch.pv_time, data & ~1ULL,
                     sizeof(struct pvclock_vcpu_time_info)))
                        vcpu->arch.pv_time_enabled = false;
                if (data & KVM_STEAL_RESERVED_MASK)
                        return 1;
 
-               if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
+               if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.st.stime,
                                                data & KVM_STEAL_VALID_BITS,
                                                sizeof(struct kvm_steal_time)))
                        return 1;
 
        vcpu->arch.st.steal.preempted = 1;
 
-       kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
+       kvm_vcpu_write_guest_offset_cached(vcpu, &vcpu->arch.st.stime,
                        &vcpu->arch.st.steal.preempted,
                        offsetof(struct kvm_steal_time, preempted),
                        sizeof(vcpu->arch.st.steal.preempted));
 
 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
 {
-
-       return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
-                                     sizeof(val));
+       return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apf.data, &val,
+                                          sizeof(val));
 }
 
 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
 
 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
                          unsigned long len);
 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
-int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-                          void *data, unsigned long len);
+int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+                              void *data, unsigned long len);
 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
                         int offset, int len);
 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
                    unsigned long len);
-int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-                          void *data, unsigned long len);
-int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-                          void *data, int offset, unsigned long len);
-int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-                             gpa_t gpa, unsigned long len);
+int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
+                               void *data, unsigned long len);
+int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
+                                      void *data, int offset, unsigned long len);
+int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
+                                  gpa_t gpa, unsigned long len);
 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 
        return 0;
 }
 
-int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
                              gpa_t gpa, unsigned long len)
 {
-       struct kvm_memslots *slots = kvm_memslots(kvm);
+       struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
        return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
 }
-EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
+EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva_cache_init);
 
-int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-                          void *data, int offset, unsigned long len)
+int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+                                      void *data, int offset, unsigned long len)
 {
-       struct kvm_memslots *slots = kvm_memslots(kvm);
+       struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
        int r;
        gpa_t gpa = ghc->gpa + offset;
 
                __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
 
        if (unlikely(!ghc->memslot))
-               return kvm_write_guest(kvm, gpa, data, len);
+               return kvm_vcpu_write_guest(vcpu, gpa, data, len);
 
        if (kvm_is_error_hva(ghc->hva))
                return -EFAULT;
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
+EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_offset_cached);
 
-int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-                          void *data, unsigned long len)
+int kvm_vcpu_write_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+                              void *data, unsigned long len)
 {
-       return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
+       return kvm_vcpu_write_guest_offset_cached(vcpu, ghc, data, 0, len);
 }
-EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
+EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_cached);
 
-int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-                          void *data, unsigned long len)
+int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+                              void *data, unsigned long len)
 {
-       struct kvm_memslots *slots = kvm_memslots(kvm);
+       struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
        int r;
 
        BUG_ON(len > ghc->len);
                __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
 
        if (unlikely(!ghc->memslot))
-               return kvm_read_guest(kvm, ghc->gpa, data, len);
+               return kvm_vcpu_read_guest(vcpu, ghc->gpa, data, len);
 
        if (kvm_is_error_hva(ghc->hva))
                return -EFAULT;
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
+EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_cached);
 
 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
 {