]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
drm/amdgpu: Serialize non TDR gpu recovery with TDRs
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Fri, 17 Dec 2021 18:05:15 +0000 (13:05 -0500)
committerAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Wed, 9 Feb 2022 17:15:23 +0000 (12:15 -0500)
Use reset domain wq also for non TDR gpu recovery trigers
such as sysfs and RAS. We must serialize all possible
GPU recoveries to gurantee no concurrency there.
For TDR call the original recovery function directly since
it's already executed from within the wq. For others just
use a wrapper to qeueue work and wait on it to finish.

v2: Rename to amdgpu_recover_work_struct

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://www.spinics.net/lists/amd-gfx/msg74113.html
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c

index b76c1cfad7f1b794eae53f1c618eaeb5a634f0da..540a38fe5cd607401d3bd03349e12707e75d3296 100644 (file)
@@ -1298,6 +1298,8 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                              struct amdgpu_job* job);
+int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
+                             struct amdgpu_job *job);
 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
 int amdgpu_device_pci_reset(struct amdgpu_device *adev);
 bool amdgpu_device_need_post(struct amdgpu_device *adev);
index 00123b0013d3f07278c5fb688ea9ed9869d5951a..15e8fde3ac2d022652855cc30ac547c5c44ffc1a 100644 (file)
@@ -5033,7 +5033,7 @@ retry:
  * Returns 0 for success or an error on failure.
  */
 
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
                              struct amdgpu_job *job)
 {
        struct list_head device_list, *device_list_handle =  NULL;
@@ -5292,6 +5292,37 @@ skip_recovery:
        return r;
 }
 
+struct amdgpu_recover_work_struct {
+       struct work_struct base;
+       struct amdgpu_device *adev;
+       struct amdgpu_job *job;
+       int ret;
+};
+
+static void amdgpu_device_queue_gpu_recover_work(struct work_struct *work)
+{
+       struct amdgpu_recover_work_struct *recover_work = container_of(work, struct amdgpu_recover_work_struct, base);
+
+       recover_work->ret = amdgpu_device_gpu_recover_imp(recover_work->adev, recover_work->job);
+}
+/*
+ * Serialize gpu recover into reset domain single threaded wq
+ */
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+                                   struct amdgpu_job *job)
+{
+       struct amdgpu_recover_work_struct work = {.adev = adev, .job = job};
+
+       INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work);
+
+       if (!queue_work(adev->reset_domain.wq, &work.base))
+               return -EAGAIN;
+
+       flush_work(&work.base);
+
+       return work.ret;
+}
+
 /**
  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
  *
index bfc47bea23db2972e358dd449f683106d6255cde..38c9fd7b7ad4e0571484c92ddd5051bf7e7684ae 100644 (file)
@@ -63,7 +63,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
                  ti.process_name, ti.tgid, ti.task_name, ti.pid);
 
        if (amdgpu_device_should_recover_gpu(ring->adev)) {
-               amdgpu_device_gpu_recover(ring->adev, job);
+               amdgpu_device_gpu_recover_imp(ring->adev, job);
        } else {
                drm_sched_suspend_timeout(&ring->sched);
                if (amdgpu_sriov_vf(adev))