static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
 {
        struct intel_vgpu *vgpu = spt->vgpu;
+       struct intel_gvt *gvt = vgpu->gvt;
+       struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
        struct intel_vgpu_ppgtt_spt *s;
        struct intel_gvt_gtt_entry se, ge;
-       unsigned long i;
+       unsigned long gfn, i;
        int ret;
 
        trace_spt_change(spt->vgpu->id, "born", spt,
 
        if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
                for_each_present_guest_entry(spt, &ge, i) {
-                       ret = gtt_entry_p2m(vgpu, &ge, &se);
-                       if (ret)
-                               goto fail;
+                       gfn = ops->get_pfn(&ge);
+                       if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn) ||
+                               gtt_entry_p2m(vgpu, &ge, &se))
+                               ops->set_pfn(&se, gvt->gtt.scratch_mfn);
                        ppgtt_set_shadow_entry(spt, &se, i);
                }
                return 0;
        struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
        struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
        unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
-       unsigned long gma;
+       unsigned long gma, gfn;
        struct intel_gvt_gtt_entry e, m;
        int ret;
 
                        bytes);
 
        if (ops->test_present(&e)) {
+               gfn = ops->get_pfn(&e);
+
+               /* one PTE update may be issued in multiple writes and the
+                * first write may not construct a valid gfn
+                */
+               if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
+                       ops->set_pfn(&m, gvt->gtt.scratch_mfn);
+                       goto out;
+               }
+
                ret = gtt_entry_p2m(vgpu, &e, &m);
                if (ret) {
                        gvt_vgpu_err("fail to translate guest gtt entry\n");
                ops->set_pfn(&m, gvt->gtt.scratch_mfn);
        }
 
+out:
        ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
        gtt_invalidate(gvt->dev_priv);
        ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
 
        return PFN_DOWN(__pa(addr));
 }
 
+static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
+{
+       struct kvmgt_guest_info *info;
+       struct kvm *kvm;
+
+       if (!handle_valid(handle))
+               return false;
+
+       info = (struct kvmgt_guest_info *)handle;
+       kvm = info->kvm;
+
+       return kvm_is_visible_gfn(kvm, gfn);
+
+}
+
 struct intel_gvt_mpt kvmgt_mpt = {
        .host_init = kvmgt_host_init,
        .host_exit = kvmgt_host_exit,
        .set_opregion = kvmgt_set_opregion,
        .get_vfio_device = kvmgt_get_vfio_device,
        .put_vfio_device = kvmgt_put_vfio_device,
+       .is_valid_gfn = kvmgt_is_valid_gfn,
 };
 EXPORT_SYMBOL_GPL(kvmgt_mpt);
 
 
        intel_gvt_host.mpt->put_vfio_device(vgpu);
 }
 
+/**
+ * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
+ * @vgpu: a vGPU
+ * @gfn: guest PFN
+ *
+ * Returns:
+ * true on valid gfn, false on not.
+ */
+static inline bool intel_gvt_hypervisor_is_valid_gfn(
+               struct intel_vgpu *vgpu, unsigned long gfn)
+{
+       if (!intel_gvt_host.mpt->is_valid_gfn)
+               return true;
+
+       return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
+}
+
 #endif /* _GVT_MPT_H_ */