#endif
 };
 
+/* validating GM healthy status*/
+#define vgpu_is_vm_unhealthy(ret_val) \
+       (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
+
 struct intel_gvt_gm {
        unsigned long vgpu_allocated_low_gm_size;
        unsigned long vgpu_allocated_high_gm_size;
 void populate_pvinfo_page(struct intel_vgpu *vgpu);
 
 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
+void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
 
 struct intel_gvt_ops {
        int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
 enum {
        GVT_FAILSAFE_UNSUPPORTED_GUEST,
        GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
+       GVT_FAILSAFE_GUEST_ERR,
 };
 
 static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
 
        (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
 
 
-static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
+void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
 {
        switch (reason) {
        case GVT_FAILSAFE_UNSUPPORTED_GUEST:
                break;
        case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
                pr_err("Graphics resource is not enough for the guest\n");
+       case GVT_FAILSAFE_GUEST_ERR:
+               pr_err("GVT Internal error  for the guest\n");
        default:
                break;
        }
 
                                        FORCEWAKE_ALL);
 
                intel_runtime_pm_put(gvt->dev_priv);
+               if (ret && (vgpu_is_vm_unhealthy(ret))) {
+                       mutex_lock(&gvt->lock);
+                       intel_vgpu_clean_execlist(vgpu);
+                       mutex_unlock(&gvt->lock);
+                       enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+               }
+
        }
        return 0;
 }