]> www.infradead.org Git - users/hch/misc.git/commitdiff
drm/amdgpu: refactor bad_page_work for corner case handling
authorChenglei Xie <Chenglei.Xie@amd.com>
Thu, 7 Aug 2025 20:52:34 +0000 (16:52 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 15 Aug 2025 17:07:30 +0000 (13:07 -0400)
When a poison is consumed on the guest before the guest receives the host's poison creation msg, a corner case may occur to have poison_handler complete processing earlier than it should to cause the guest to hang waiting for the req_bad_pages reply during a VF FLR, resulting in the VM becoming inaccessible in stress tests.

To fix this issue, this patch refactored the mailbox sequence by seperating the bad_page_work into two parts req_bad_pages_work and handle_bad_pages_work.
Old sequence:
  1.Stop data exchange work
  2.Guest sends MB_REQ_RAS_BAD_PAGES to host and keep polling for IDH_RAS_BAD_PAGES_READY
  3.If the IDH_RAS_BAD_PAGES_READY arrives within timeout limit, re-init the data exchange region for updated bad page info
    else timeout with error message
New sequence:
req_bad_pages_work:
  1.Stop data exhange work
  2.Guest sends MB_REQ_RAS_BAD_PAGES to host
Once Guest receives IDH_RAS_BAD_PAGES_READY event
handle_bad_pages_work:
  3.re-init the data exchange region for updated bad page info

Signed-off-by: Chenglei Xie <Chenglei.Xie@amd.com>
Reviewed-by: Shravan Kumar Gande <Shravankumar.Gande@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
drivers/gpu/drm/amd/amdgpu/soc15.c

index 3da3ebb1d9a134132643e1b82a9595c3232e1222..58accf2259b38fefb61601f0f11fd5eb433f05f0 100644 (file)
@@ -267,7 +267,8 @@ struct amdgpu_virt {
        struct amdgpu_irq_src           rcv_irq;
 
        struct work_struct              flr_work;
-       struct work_struct              bad_pages_work;
+       struct work_struct              req_bad_pages_work;
+       struct work_struct              handle_bad_pages_work;
 
        struct amdgpu_mm_table          mm_table;
        const struct amdgpu_virt_ops    *ops;
index 48101a34e049f450c8d7bee81c5e5c772c8502c7..9a40107a0869d1e80c8d38d087a1a29317797eed 100644 (file)
@@ -292,14 +292,32 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
        }
 }
 
-static void xgpu_ai_mailbox_bad_pages_work(struct work_struct *work)
+static void xgpu_ai_mailbox_req_bad_pages_work(struct work_struct *work)
 {
-       struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+       struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
        struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
 
        if (down_read_trylock(&adev->reset_domain->sem)) {
                amdgpu_virt_fini_data_exchange(adev);
                amdgpu_virt_request_bad_pages(adev);
+               up_read(&adev->reset_domain->sem);
+       }
+}
+
+/**
+ * xgpu_ai_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
+ * @work: pointer to the work_struct
+ *
+ * This work handler is triggered when bad pages are ready, and it reinitializes
+ * the data exchange region to retrieve updated bad page information from the host.
+ */
+static void xgpu_ai_mailbox_handle_bad_pages_work(struct work_struct *work)
+{
+       struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
+       struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+       if (down_read_trylock(&adev->reset_domain->sem)) {
+               amdgpu_virt_fini_data_exchange(adev);
                amdgpu_virt_init_data_exchange(adev);
                up_read(&adev->reset_domain->sem);
        }
@@ -327,10 +345,15 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 
        switch (event) {
+       case IDH_RAS_BAD_PAGES_READY:
+               xgpu_ai_mailbox_send_ack(adev);
+               if (amdgpu_sriov_runtime(adev))
+                       schedule_work(&adev->virt.handle_bad_pages_work);
+               break;
        case IDH_RAS_BAD_PAGES_NOTIFICATION:
                xgpu_ai_mailbox_send_ack(adev);
                if (amdgpu_sriov_runtime(adev))
-                       schedule_work(&adev->virt.bad_pages_work);
+                       schedule_work(&adev->virt.req_bad_pages_work);
                break;
        case IDH_UNRECOV_ERR_NOTIFICATION:
                xgpu_ai_mailbox_send_ack(adev);
@@ -415,7 +438,8 @@ int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
        }
 
        INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
-       INIT_WORK(&adev->virt.bad_pages_work, xgpu_ai_mailbox_bad_pages_work);
+       INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_ai_mailbox_req_bad_pages_work);
+       INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_ai_mailbox_handle_bad_pages_work);
 
        return 0;
 }
index f6d8597452ed0123d573e1351538bc356a733ef8..457972aa56324b7ba89422f0a5494b74b6a45d5e 100644 (file)
@@ -202,9 +202,6 @@ send_request:
        case IDH_REQ_RAS_CPER_DUMP:
                event = IDH_RAS_CPER_DUMP_READY;
                break;
-       case IDH_REQ_RAS_BAD_PAGES:
-               event = IDH_RAS_BAD_PAGES_READY;
-               break;
        default:
                break;
        }
@@ -359,14 +356,32 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
        }
 }
 
-static void xgpu_nv_mailbox_bad_pages_work(struct work_struct *work)
+static void xgpu_nv_mailbox_req_bad_pages_work(struct work_struct *work)
 {
-       struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+       struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
        struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
 
        if (down_read_trylock(&adev->reset_domain->sem)) {
                amdgpu_virt_fini_data_exchange(adev);
                amdgpu_virt_request_bad_pages(adev);
+               up_read(&adev->reset_domain->sem);
+       }
+}
+
+/**
+ * xgpu_nv_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
+ * @work: pointer to the work_struct
+ *
+ * This work handler is triggered when bad pages are ready, and it reinitializes
+ * the data exchange region to retrieve updated bad page information from the host.
+ */
+static void xgpu_nv_mailbox_handle_bad_pages_work(struct work_struct *work)
+{
+       struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
+       struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+       if (down_read_trylock(&adev->reset_domain->sem)) {
+               amdgpu_virt_fini_data_exchange(adev);
                amdgpu_virt_init_data_exchange(adev);
                up_read(&adev->reset_domain->sem);
        }
@@ -397,10 +412,15 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 
        switch (event) {
+       case IDH_RAS_BAD_PAGES_READY:
+               xgpu_nv_mailbox_send_ack(adev);
+               if (amdgpu_sriov_runtime(adev))
+                       schedule_work(&adev->virt.handle_bad_pages_work);
+               break;
        case IDH_RAS_BAD_PAGES_NOTIFICATION:
                xgpu_nv_mailbox_send_ack(adev);
                if (amdgpu_sriov_runtime(adev))
-                       schedule_work(&adev->virt.bad_pages_work);
+                       schedule_work(&adev->virt.req_bad_pages_work);
                break;
        case IDH_UNRECOV_ERR_NOTIFICATION:
                xgpu_nv_mailbox_send_ack(adev);
@@ -485,7 +505,8 @@ int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
        }
 
        INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
-       INIT_WORK(&adev->virt.bad_pages_work, xgpu_nv_mailbox_bad_pages_work);
+       INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_nv_mailbox_req_bad_pages_work);
+       INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_nv_mailbox_handle_bad_pages_work);
 
        return 0;
 }
index 9e74c9822e622a054751e138a469d984b1b96c13..9785fada4fa799c9a8b7ea34c3e5823ebdc386ef 100644 (file)
@@ -741,7 +741,6 @@ static void soc15_reg_base_init(struct amdgpu_device *adev)
 void soc15_set_virt_ops(struct amdgpu_device *adev)
 {
        adev->virt.ops = &xgpu_ai_virt_ops;
-
        /* init soc15 reg base early enough so we can
         * request request full access for sriov before
         * set_ip_blocks. */