]> www.infradead.org Git - users/hch/misc.git/commitdiff
drm/amdgpu: Introduce VF critical region check for RAS poison injection
authorXiang Liu <xiang.liu@amd.com>
Tue, 19 Aug 2025 04:51:28 +0000 (12:51 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 18 Sep 2025 13:43:02 +0000 (09:43 -0400)
The SRIOV guest send requet to host to check whether the poison
injection address is in VF critical region or not via mabox.

Signed-off-by: Xiang Liu <xiang.liu@amd.com>
Reviewed-by: Shravan Kumar Gande <Shravankumar.Gande@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h

index 13f0cdeb59c46c429f675c5417b5076706bb01d7..3328ab63376bb18a8c54b9034165d8b39a663ce3 100644 (file)
@@ -828,11 +828,14 @@ static void amdgpu_virt_init_ras(struct amdgpu_device *adev)
 {
        ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1);
        ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1);
+       ratelimit_state_init(&adev->virt.ras.ras_chk_criti_rs, 5 * HZ, 1);
 
        ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs,
                            RATELIMIT_MSG_ON_RELEASE);
        ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs,
                            RATELIMIT_MSG_ON_RELEASE);
+       ratelimit_set_flags(&adev->virt.ras.ras_chk_criti_rs,
+                           RATELIMIT_MSG_ON_RELEASE);
 
        mutex_init(&adev->virt.ras.ras_telemetry_mutex);
 
@@ -1501,3 +1504,55 @@ void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev)
        if (virt->ops && virt->ops->req_bad_pages)
                virt->ops->req_bad_pages(adev);
 }
+
+static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev,
+                                          struct amdsriov_ras_telemetry *host_telemetry,
+                                          bool *hit)
+{
+       struct amd_sriov_ras_chk_criti *tmp = NULL;
+       uint32_t checksum, used_size;
+
+       checksum = host_telemetry->header.checksum;
+       used_size = host_telemetry->header.used_size;
+
+       if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+               return 0;
+
+       tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
+
+       if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
+               goto out;
+
+       if (hit)
+               *hit = tmp->hit ? true : false;
+
+out:
+       kfree(tmp);
+
+       return 0;
+}
+
+int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit)
+{
+       struct amdgpu_virt *virt = &adev->virt;
+       int r = -EPERM;
+
+       if (!virt->ops || !virt->ops->req_ras_chk_criti)
+               return -EOPNOTSUPP;
+
+       /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
+        * will ignore incoming guest messages. Ratelimit the guest messages to
+        * prevent guest self DOS.
+        */
+       if (__ratelimit(&virt->ras.ras_chk_criti_rs)) {
+               mutex_lock(&virt->ras.ras_telemetry_mutex);
+               if (!virt->ops->req_ras_chk_criti(adev, addr))
+                       r = amdgpu_virt_cache_chk_criti_hit(
+                               adev, virt->fw_reserve.ras_telemetry, hit);
+               mutex_unlock(&virt->ras.ras_telemetry_mutex);
+       }
+
+       return r;
+}
index 3afbdf7b487a1aac2e33a7b4151702636988a6e7..d1172c8e58c477e976472c1bd8c8c1d7bee3dea1 100644 (file)
@@ -98,6 +98,7 @@ struct amdgpu_virt_ops {
        int (*req_ras_err_count)(struct amdgpu_device *adev);
        int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
        int (*req_bad_pages)(struct amdgpu_device *adev);
+       int (*req_ras_chk_criti)(struct amdgpu_device *adev, u64 addr);
 };
 
 /*
@@ -252,6 +253,7 @@ struct amdgpu_virt_ras_err_handler_data {
 struct amdgpu_virt_ras {
        struct ratelimit_state ras_error_cnt_rs;
        struct ratelimit_state ras_cper_dump_rs;
+       struct ratelimit_state ras_chk_criti_rs;
        struct mutex ras_telemetry_mutex;
        uint64_t cper_rptr;
 };
@@ -453,4 +455,5 @@ int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
 bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
                                        enum amdgpu_ras_block block);
 void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev);
+int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit);
 #endif
index 33edad1f9dcd6d292663e3717936a44de8871c72..3a79ed7d8031e6ea578f12ad9307aadcfb1a96f5 100644 (file)
@@ -405,12 +405,17 @@ struct amd_sriov_ras_cper_dump {
        uint32_t buf[];
 };
 
+struct amd_sriov_ras_chk_criti {
+       uint32_t hit;
+};
+
 struct amdsriov_ras_telemetry {
        struct amd_sriov_ras_telemetry_header header;
 
        union {
                struct amd_sriov_ras_telemetry_error_count error_count;
                struct amd_sriov_ras_cper_dump cper_dump;
+               struct amd_sriov_ras_chk_criti chk_criti;
        } body;
 };
 
index 457972aa56324b7ba89422f0a5494b74b6a45d5e..e5282a5d05d95721e046be09ed245cee93716e68 100644 (file)
@@ -202,6 +202,9 @@ send_request:
        case IDH_REQ_RAS_CPER_DUMP:
                event = IDH_RAS_CPER_DUMP_READY;
                break;
+       case IDH_REQ_RAS_CHK_CRITI:
+               event = IDH_REQ_RAS_CHK_CRITI_READY;
+               break;
        default:
                break;
        }
@@ -556,6 +559,16 @@ static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev)
        return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES);
 }
 
+static int xgpu_nv_check_vf_critical_region(struct amdgpu_device *adev, u64 addr)
+{
+       uint32_t addr_hi, addr_lo;
+
+       addr_hi = (uint32_t)(addr >> 32);
+       addr_lo = (uint32_t)(addr & 0xFFFFFFFF);
+       return xgpu_nv_send_access_requests_with_param(
+               adev, IDH_REQ_RAS_CHK_CRITI, addr_hi, addr_lo, 0);
+}
+
 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
        .req_full_gpu   = xgpu_nv_request_full_gpu_access,
        .rel_full_gpu   = xgpu_nv_release_full_gpu_access,
@@ -569,4 +582,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
        .req_ras_err_count = xgpu_nv_req_ras_err_count,
        .req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
        .req_bad_pages = xgpu_nv_req_ras_bad_pages,
+       .req_ras_chk_criti = xgpu_nv_check_vf_critical_region
 };
index 5808689562ccb9f14a53b64cbd9b3aa43e3b7450..c1083e5e41e0299991444c660efaf430c153cb61 100644 (file)
@@ -43,6 +43,7 @@ enum idh_request {
        IDH_REQ_RAS_ERROR_COUNT = 203,
        IDH_REQ_RAS_CPER_DUMP = 204,
        IDH_REQ_RAS_BAD_PAGES = 205,
+       IDH_REQ_RAS_CHK_CRITI = 206
 };
 
 enum idh_event {
@@ -62,6 +63,7 @@ enum idh_event {
        IDH_RAS_BAD_PAGES_READY = 15,
        IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
        IDH_UNRECOV_ERR_NOTIFICATION = 17,
+       IDH_REQ_RAS_CHK_CRITI_READY = 18,
 
        IDH_TEXT_MESSAGE = 255,
 };