]> www.infradead.org Git - users/hch/misc.git/commitdiff
drm/amdgpu/userq: add force completion helpers
authorAlex Deucher <alexander.deucher@amd.com>
Fri, 18 Apr 2025 15:35:49 +0000 (11:35 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 5 Sep 2025 21:38:38 +0000 (17:38 -0400)
Add support for forcing completion of userq fences.
This is needed for userq resets and asic resets so that we
can set the error on the fence and force completion.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h

index c2a983ff23c95d0c59709e19f48fed6f5072706d..95e91d1dc58ace86ca89baafd9ddc70fb62ad302 100644 (file)
@@ -67,6 +67,14 @@ static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
        return le64_to_cpu(*fence_drv->cpu_addr);
 }
 
+static void
+amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
+                        u64 seq)
+{
+       if (fence_drv->cpu_addr)
+               *fence_drv->cpu_addr = cpu_to_le64(seq);
+}
+
 int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
                                    struct amdgpu_usermode_queue *userq)
 {
@@ -408,6 +416,40 @@ static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
        dma_fence_put(fence);
 }
 
+static void
+amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
+                                   int error)
+{
+       struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+       unsigned long flags;
+       struct dma_fence *f;
+
+       spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+
+       f = rcu_dereference_protected(&fence->base,
+                                     lockdep_is_held(&fence_drv->fence_list_lock));
+       if (f && !dma_fence_is_signaled_locked(f))
+               dma_fence_set_error(f, error);
+       spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+}
+
+void
+amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
+{
+       struct dma_fence *f = userq->last_fence;
+
+       if (f) {
+               struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+               struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+               u64 wptr = fence->base.seqno;
+
+               amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
+               amdgpu_userq_fence_write(fence_drv, wptr);
+               amdgpu_userq_fence_driver_process(fence_drv);
+
+       }
+}
+
 int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *filp)
 {
index 97a125ab8a78696958c45fa8f566e1872f88746f..d76add2afc7740303fe1d8b68088beeccee89da9 100644 (file)
@@ -67,6 +67,7 @@ int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
                                    struct amdgpu_usermode_queue *userq);
 void amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq);
 void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq);
 void amdgpu_userq_fence_driver_destroy(struct kref *ref);
 int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *filp);