#define AMDGPU_RESET_VCE                       (1 << 13)
 #define AMDGPU_RESET_VCE1                      (1 << 14)
 
+/* reset mask */
+#define AMDGPU_RESET_TYPE_FULL (1 << 0) /* full adapter reset, mode1/mode2/BACO/etc. */
+#define AMDGPU_RESET_TYPE_SOFT_RESET (1 << 1) /* IP level soft reset */
+#define AMDGPU_RESET_TYPE_PER_QUEUE (1 << 2) /* per queue */
+#define AMDGPU_RESET_TYPE_PER_PIPE (1 << 3) /* per pipe */
+
 /* max cursor sizes (in pixels) */
 #define CIK_CURSOR_WIDTH 128
 #define CIK_CURSOR_HEIGHT 128
 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
                                            struct dma_fence *gang);
 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
+ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring);
+ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset);
 
 /* atpx handler */
 #if defined(CONFIG_VGA_SWITCHEROO)
 
        }
        return ret;
 }
+
+ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring)
+{
+       ssize_t size = 0;
+
+       if (!ring || !ring->adev)
+               return size;
+
+       if (amdgpu_device_should_recover_gpu(ring->adev))
+               size |= AMDGPU_RESET_TYPE_FULL;
+
+       if (unlikely(!ring->adev->debug_disable_soft_recovery) &&
+           !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery)
+               size |= AMDGPU_RESET_TYPE_SOFT_RESET;
+
+       return size;
+}
+
+ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
+{
+       ssize_t size = 0;
+
+       if (supported_reset == 0) {
+               size += sysfs_emit_at(buf, size, "unsupported");
+               size += sysfs_emit_at(buf, size, "\n");
+               return size;
+
+       }
+
+       if (supported_reset & AMDGPU_RESET_TYPE_SOFT_RESET)
+               size += sysfs_emit_at(buf, size, "soft ");
+
+       if (supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
+               size += sysfs_emit_at(buf, size, "queue ");
+
+       if (supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)
+               size += sysfs_emit_at(buf, size, "pipe ");
+
+       if (supported_reset & AMDGPU_RESET_TYPE_FULL)
+               size += sysfs_emit_at(buf, size, "full ");
+
+       size += sysfs_emit_at(buf, size, "\n");
+       return size;
+}
 
        return count;
 }
 
+static ssize_t amdgpu_gfx_get_gfx_reset_mask(struct device *dev,
+                                               struct device_attribute *attr,
+                                               char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(ddev);
+
+       if (!adev)
+               return -ENODEV;
+
+       return amdgpu_show_reset_mask(buf, adev->gfx.gfx_supported_reset);
+}
+
+static ssize_t amdgpu_gfx_get_compute_reset_mask(struct device *dev,
+                                               struct device_attribute *attr,
+                                               char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(ddev);
+
+       if (!adev)
+               return -ENODEV;
+
+       return amdgpu_show_reset_mask(buf, adev->gfx.compute_supported_reset);
+}
+
 static DEVICE_ATTR(run_cleaner_shader, 0200,
                   NULL, amdgpu_gfx_set_run_cleaner_shader);
 
 
 static DEVICE_ATTR(available_compute_partition, 0444,
                   amdgpu_gfx_get_available_compute_partition, NULL);
+static DEVICE_ATTR(gfx_reset_mask, 0444,
+                  amdgpu_gfx_get_gfx_reset_mask, NULL);
+
+static DEVICE_ATTR(compute_reset_mask, 0444,
+                  amdgpu_gfx_get_compute_reset_mask, NULL);
 
 static int amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device *adev)
 {
                device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
 }
 
+static int amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+       int r = 0;
+
+       if (!amdgpu_gpu_recovery)
+               return r;
+
+       if (adev->gfx.num_gfx_rings) {
+               r = device_create_file(adev->dev, &dev_attr_gfx_reset_mask);
+               if (r)
+                       return r;
+       }
+
+       if (adev->gfx.num_compute_rings) {
+               r = device_create_file(adev->dev, &dev_attr_compute_reset_mask);
+               if (r)
+                       return r;
+       }
+
+       return r;
+}
+
+static void amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device *adev)
+{
+       if (!amdgpu_gpu_recovery)
+               return;
+
+       if (adev->gfx.num_gfx_rings)
+               device_remove_file(adev->dev, &dev_attr_gfx_reset_mask);
+
+       if (adev->gfx.num_compute_rings)
+               device_remove_file(adev->dev, &dev_attr_compute_reset_mask);
+}
+
 int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
 {
        int r;
        if (r)
                dev_err(adev->dev, "failed to create isolation sysfs files");
 
+       r = amdgpu_gfx_sysfs_reset_mask_init(adev);
+       if (r)
+               dev_err(adev->dev, "failed to create reset mask sysfs files");
+
        return r;
 }
 
 {
        amdgpu_gfx_sysfs_xcp_fini(adev);
        amdgpu_gfx_sysfs_isolation_shader_fini(adev);
+       amdgpu_gfx_sysfs_reset_mask_fini(adev);
 }
 
 int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
 
        /* reset mask */
        uint32_t                        grbm_soft_reset;
        uint32_t                        srbm_soft_reset;
+       uint32_t                        gfx_supported_reset;
+       uint32_t                        compute_supported_reset;
 
        /* gfx off */
        bool                            gfx_off_state;      /* true: enabled, false: disabled */
 
                        }
                }
        }
+       /* TODO: Add queue reset mask when FW fully supports it */
+       adev->gfx.gfx_supported_reset =
+               amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+       adev->gfx.compute_supported_reset =
+               amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
 
        r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0);
        if (r) {
 
                }
        }
 
+       adev->gfx.gfx_supported_reset =
+               amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+       adev->gfx.compute_supported_reset =
+               amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+       case IP_VERSION(11, 0, 0):
+       case IP_VERSION(11, 0, 2):
+       case IP_VERSION(11, 0, 3):
+               if ((adev->gfx.me_fw_version >= 2280) &&
+                           (adev->gfx.mec_fw_version >= 2410)) {
+                               adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+                               adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+               }
+               break;
+       default:
+               break;
+       }
+
        if (!adev->enable_mes_kiq) {
                r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0);
                if (r) {
 
                }
        }
 
+       /* TODO: Add queue reset mask when FW fully supports it */
+       adev->gfx.gfx_supported_reset =
+               amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+       adev->gfx.compute_supported_reset =
+               amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
        if (!adev->enable_mes_kiq) {
                r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, 0);
                if (r) {
 
                }
        }
 
+       /* TODO: Add queue reset mask when FW fully supports it */
+       adev->gfx.gfx_supported_reset =
+               amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+       adev->gfx.compute_supported_reset =
+               amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
        r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
        if (r) {
                DRM_ERROR("Failed to init KIQ BOs!\n");
 
                        return r;
        }
 
+       adev->gfx.compute_supported_reset =
+               amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+       switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+       case IP_VERSION(9, 4, 3):
+       case IP_VERSION(9, 4, 4):
+               if (adev->gfx.mec_fw_version >= 155) {
+                       adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+                       adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
+               }
+               break;
+       default:
+               break;
+       }
        r = gfx_v9_4_3_gpu_early_init(adev);
        if (r)
                return r;