need_full_reset = job_signaled = false;
        INIT_LIST_HEAD(&device_list);
 
+       amdgpu_ras_set_error_query_ready(adev, false);
+
        dev_info(adev->dev, "GPU %s begin!\n",
                (in_ras_intr && !use_baco) ? "jobs stop":"reset");
 
        /* block all schedulers and reset given job's ring */
        list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
                if (tmp_adev != adev) {
+                       amdgpu_ras_set_error_query_ready(tmp_adev, false);
                        amdgpu_device_lock_adev(tmp_adev, false);
                        if (!amdgpu_sriov_vf(tmp_adev))
                                        amdgpu_amdkfd_pre_reset(tmp_adev);
 
 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
                                uint64_t addr);
 
+void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
+{
+       if (adev)
+               amdgpu_ras_get_context(adev)->error_query_ready = ready;
+}
+
+bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
+{
+       if (adev)
+               return amdgpu_ras_get_context(adev)->error_query_ready;
+
+       return false;
+}
+
 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
                                        size_t size, loff_t *pos)
 {
        struct ras_debug_if data;
        int ret = 0;
 
-       if (amdgpu_ras_intr_triggered()) {
+       if (!amdgpu_ras_get_error_query_ready(adev)) {
                DRM_WARN("RAS WARN: error injection currently inaccessible\n");
                return size;
        }
                .head = obj->head,
        };
 
-       if (amdgpu_ras_intr_triggered())
+       if (!amdgpu_ras_get_error_query_ready(obj->adev))
                return snprintf(buf, PAGE_SIZE,
                                "Query currently inaccessible\n");
 
        }
 
        /* in resume phase, no need to create ras fs node */
-       if (adev->in_suspend || adev->in_gpu_reset)
+       if (adev->in_suspend || adev->in_gpu_reset) {
+               amdgpu_ras_set_error_query_ready(adev, true);
                return 0;
+       }
 
        if (ih_info->cb) {
                r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
        if (r)
                goto sysfs;
 
+       amdgpu_ras_set_error_query_ready(adev, true);
+
        return 0;
 cleanup:
        amdgpu_ras_sysfs_remove(adev, ras_block);