static void record_steal_time(struct kvm_vcpu *vcpu)
 {
-       struct kvm_host_map map;
-       struct kvm_steal_time *st;
+       struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
+       struct kvm_steal_time __user *st;
+       struct kvm_memslots *slots;
+       u64 steal;
+       u32 version;
 
        if (kvm_xen_msr_enabled(vcpu->kvm)) {
                kvm_xen_runstate_set_running(vcpu);
        if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
                return;
 
-       /* -EAGAIN is returned in atomic context so we can just return. */
-       if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
-                       &map, &vcpu->arch.st.cache, false))
+       if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm))
                return;
 
-       st = map.hva +
-               offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
+       slots = kvm_memslots(vcpu->kvm);
+
+       if (unlikely(slots->generation != ghc->generation ||
+                    kvm_is_error_hva(ghc->hva) || !ghc->memslot)) {
+               gfn_t gfn = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
+
+               /* We rely on the fact that it fits in a single page. */
+               BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS);
+
+               if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gfn, sizeof(*st)) ||
+                   kvm_is_error_hva(ghc->hva) || !ghc->memslot)
+                       return;
+       }
+
+       st = (struct kvm_steal_time __user *)ghc->hva;
+       if (!user_access_begin(st, sizeof(*st)))
+               return;
 
        /*
         * Doing a TLB flush here, on the guest's behalf, can avoid
         * expensive IPIs.
         */
        if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
-               u8 st_preempted = xchg(&st->preempted, 0);
+               u8 st_preempted = 0;
+               int err = -EFAULT;
+
+               asm volatile("1: xchgb %0, %2\n"
+                            "xor %1, %1\n"
+                            "2:\n"
+                            _ASM_EXTABLE_UA(1b, 2b)
+                            : "+r" (st_preempted),
+                              "+&r" (err)
+                            : "m" (st->preempted));
+               if (err)
+                       goto out;
+
+               user_access_end();
+
+               vcpu->arch.st.preempted = 0;
 
                trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
                                       st_preempted & KVM_VCPU_FLUSH_TLB);
                if (st_preempted & KVM_VCPU_FLUSH_TLB)
                        kvm_vcpu_flush_tlb_guest(vcpu);
+
+               if (!user_access_begin(st, sizeof(*st)))
+                       goto dirty;
        } else {
-               st->preempted = 0;
+               unsafe_put_user(0, &st->preempted, out);
+               vcpu->arch.st.preempted = 0;
        }
 
-       vcpu->arch.st.preempted = 0;
-
-       if (st->version & 1)
-               st->version += 1;  /* first time write, random junk */
+       unsafe_get_user(version, &st->version, out);
+       if (version & 1)
+               version += 1;  /* first time write, random junk */
 
-       st->version += 1;
+       version += 1;
+       unsafe_put_user(version, &st->version, out);
 
        smp_wmb();
 
-       st->steal += current->sched_info.run_delay -
+       unsafe_get_user(steal, &st->steal, out);
+       steal += current->sched_info.run_delay -
                vcpu->arch.st.last_steal;
        vcpu->arch.st.last_steal = current->sched_info.run_delay;
+       unsafe_put_user(steal, &st->steal, out);
 
-       smp_wmb();
-
-       st->version += 1;
+       version += 1;
+       unsafe_put_user(version, &st->version, out);
 
-       kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
+ out:
+       user_access_end();
+ dirty:
+       mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
 }
 
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 {
-       struct kvm_host_map map;
-       struct kvm_steal_time *st;
+       struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
+       struct kvm_steal_time __user *st;
+       struct kvm_memslots *slots;
+       static const u8 preempted = KVM_VCPU_PREEMPTED;
 
        if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
                return;
        if (vcpu->arch.st.preempted)
                return;
 
-       if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
-                       &vcpu->arch.st.cache, true))
+       /* This happens on process exit */
+       if (unlikely(current->mm != vcpu->kvm->mm))
                return;
 
-       st = map.hva +
-               offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
+       slots = kvm_memslots(vcpu->kvm);
+
+       if (unlikely(slots->generation != ghc->generation ||
+                    kvm_is_error_hva(ghc->hva) || !ghc->memslot))
+               return;
 
-       st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
+       st = (struct kvm_steal_time __user *)ghc->hva;
+       BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted));
 
-       kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
+       if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted)))
+               vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
+
+       mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
-       struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
        int idx;
 
-       kvm_release_pfn(cache->pfn, cache->dirty, cache);
-
        kvmclock_reset(vcpu);
 
        static_call(kvm_x86_vcpu_free)(vcpu);