]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
drm/msm: Don't use a worker to capture fault devcoredump
authorConnor Abbott <cwabbott0@gmail.com>
Tue, 20 May 2025 19:08:57 +0000 (15:08 -0400)
committerRob Clark <robin.clark@oss.qualcomm.com>
Mon, 9 Jun 2025 18:36:21 +0000 (11:36 -0700)
Now that we use a threaded IRQ, it should be safe to do this in the
fault handler.

We can also remove fault_info from struct msm_gpu and just pass it
directly.

Signed-off-by: Connor Abbott <cwabbott0@gmail.com>
Patchwork: https://patchwork.freedesktop.org/patch/654889/
Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_gpu.h

index 2348ffb35f7eb73a26da47881901d9111dca1ad9..5ebd5a6ea14390374fda3afbdfc9be8f5f2a910a 100644 (file)
@@ -270,14 +270,6 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
        const char *type = "UNKNOWN";
        bool do_devcoredump = info && !READ_ONCE(gpu->crashstate);
 
-       /*
-        * If we aren't going to be resuming later from fault_worker, then do
-        * it now.
-        */
-       if (!do_devcoredump) {
-               gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
-       }
-
        /*
         * Print a default message if we couldn't get the data from the
         * adreno-smmu-priv
@@ -304,16 +296,18 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
                        scratch[0], scratch[1], scratch[2], scratch[3]);
 
        if (do_devcoredump) {
+               struct msm_gpu_fault_info fault_info = {};
+
                /* Turn off the hangcheck timer to keep it from bothering us */
                timer_delete(&gpu->hangcheck_timer);
 
-               gpu->fault_info.ttbr0 = info->ttbr0;
-               gpu->fault_info.iova  = iova;
-               gpu->fault_info.flags = flags;
-               gpu->fault_info.type  = type;
-               gpu->fault_info.block = block;
+               fault_info.ttbr0 = info->ttbr0;
+               fault_info.iova  = iova;
+               fault_info.flags = flags;
+               fault_info.type  = type;
+               fault_info.block = block;
 
-               kthread_queue_work(gpu->worker, &gpu->fault_work);
+               msm_gpu_fault_crashstate_capture(gpu, &fault_info);
        }
 
        return 0;
index c380d9d9f5af10b90ef733b05f5b0295c0445f38..457f019d507e954daeb609c313d37ee64fd492f9 100644 (file)
@@ -257,7 +257,8 @@ out:
 }
 
 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
-               struct msm_gem_submit *submit, char *comm, char *cmd)
+               struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
+               char *comm, char *cmd)
 {
        struct msm_gpu_state *state;
 
@@ -276,7 +277,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
        /* Fill in the additional crash state information */
        state->comm = kstrdup(comm, GFP_KERNEL);
        state->cmd = kstrdup(cmd, GFP_KERNEL);
-       state->fault_info = gpu->fault_info;
+       if (fault_info)
+               state->fault_info = *fault_info;
 
        if (submit) {
                int i;
@@ -308,7 +310,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
 }
 #else
 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
-               struct msm_gem_submit *submit, char *comm, char *cmd)
+               struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
+               char *comm, char *cmd)
 {
 }
 #endif
@@ -405,7 +408,7 @@ static void recover_worker(struct kthread_work *work)
 
        /* Record the crash state */
        pm_runtime_get_sync(&gpu->pdev->dev);
-       msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+       msm_gpu_crashstate_capture(gpu, submit, NULL, comm, cmd);
 
        kfree(cmd);
        kfree(comm);
@@ -459,9 +462,8 @@ out_unlock:
        msm_gpu_retire(gpu);
 }
 
-static void fault_worker(struct kthread_work *work)
+void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info)
 {
-       struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
        struct msm_gem_submit *submit;
        struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
        char *comm = NULL, *cmd = NULL;
@@ -484,16 +486,13 @@ static void fault_worker(struct kthread_work *work)
 
        /* Record the crash state */
        pm_runtime_get_sync(&gpu->pdev->dev);
-       msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+       msm_gpu_crashstate_capture(gpu, submit, fault_info, comm, cmd);
        pm_runtime_put_sync(&gpu->pdev->dev);
 
        kfree(cmd);
        kfree(comm);
 
 resume_smmu:
-       memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
-       gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
-
        mutex_unlock(&gpu->lock);
 }
 
@@ -882,7 +881,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
        init_waitqueue_head(&gpu->retire_event);
        kthread_init_work(&gpu->retire_work, retire_worker);
        kthread_init_work(&gpu->recover_work, recover_worker);
-       kthread_init_work(&gpu->fault_work, fault_worker);
 
        priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
 
index e25009150579c08f7b98d4461a75757d1093734a..bed0692f5adb30e50d0448640a329158d1ffe5e5 100644 (file)
@@ -253,12 +253,6 @@ struct msm_gpu {
 #define DRM_MSM_HANGCHECK_PROGRESS_RETRIES 3
        struct timer_list hangcheck_timer;
 
-       /* Fault info for most recent iova fault: */
-       struct msm_gpu_fault_info fault_info;
-
-       /* work for handling GPU ioval faults: */
-       struct kthread_work fault_work;
-
        /* work for handling GPU recovery: */
        struct kthread_work recover_work;
 
@@ -705,6 +699,8 @@ static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
        mutex_unlock(&gpu->lock);
 }
 
+void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info);
+
 /*
  * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
  * support expanded privileges