return 0;
        }
 
-       if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
+       if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
+                                       sizeof(u32)))
                return 1;
 
        vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
 
                gpa_offset = data & ~(PAGE_MASK | 1);
 
-               /* Check that the address is 32-byte aligned. */
-               if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
-                       break;
-
                if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
-                    &vcpu->arch.pv_time, data & ~1ULL))
+                    &vcpu->arch.pv_time, data & ~1ULL,
+                    sizeof(struct pvclock_vcpu_time_info)))
                        vcpu->arch.pv_time_enabled = false;
                else
                        vcpu->arch.pv_time_enabled = true;
                        return 1;
 
                if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
-                                                       data & KVM_STEAL_VALID_BITS))
+                                               data & KVM_STEAL_VALID_BITS,
+                                               sizeof(struct kvm_steal_time)))
                        return 1;
 
                vcpu->arch.st.msr_val = data;
 
 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
                           void *data, unsigned long len);
 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-                             gpa_t gpa);
+                             gpa_t gpa, unsigned long len);
 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 
 }
 
 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-                             gpa_t gpa)
+                             gpa_t gpa, unsigned long len)
 {
        struct kvm_memslots *slots = kvm_memslots(kvm);
        int offset = offset_in_page(gpa);
-       gfn_t gfn = gpa >> PAGE_SHIFT;
+       gfn_t start_gfn = gpa >> PAGE_SHIFT;
+       gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
+       gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
+       gfn_t nr_pages_avail;
 
        ghc->gpa = gpa;
        ghc->generation = slots->generation;
-       ghc->memslot = gfn_to_memslot(kvm, gfn);
-       ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
-       if (!kvm_is_error_hva(ghc->hva))
+       ghc->len = len;
+       ghc->memslot = gfn_to_memslot(kvm, start_gfn);
+       ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
+       if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
                ghc->hva += offset;
-       else
-               return -EFAULT;
-
+       } else {
+               /*
+                * If the requested region crosses two memslots, we still
+                * verify that the entire region is valid here.
+                */
+               while (start_gfn <= end_gfn) {
+                       ghc->memslot = gfn_to_memslot(kvm, start_gfn);
+                       ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
+                                                  &nr_pages_avail);
+                       if (kvm_is_error_hva(ghc->hva))
+                               return -EFAULT;
+                       start_gfn += nr_pages_avail;
+               }
+               /* Use the slow path for cross page reads and writes. */
+               ghc->memslot = NULL;
+       }
        return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
        struct kvm_memslots *slots = kvm_memslots(kvm);
        int r;
 
+       BUG_ON(len > ghc->len);
+
        if (slots->generation != ghc->generation)
-               kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
+               kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
+
+       if (unlikely(!ghc->memslot))
+               return kvm_write_guest(kvm, ghc->gpa, data, len);
 
        if (kvm_is_error_hva(ghc->hva))
                return -EFAULT;
        struct kvm_memslots *slots = kvm_memslots(kvm);
        int r;
 
+       BUG_ON(len > ghc->len);
+
        if (slots->generation != ghc->generation)
-               kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
+               kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
+
+       if (unlikely(!ghc->memslot))
+               return kvm_read_guest(kvm, ghc->gpa, data, len);
 
        if (kvm_is_error_hva(ghc->hva))
                return -EFAULT;