]> www.infradead.org Git - nvme.git/commitdiff
drm/amdgpu: revert "take runtime pm reference when we attach a buffer" v2
authorChristian König <christian.koenig@amd.com>
Wed, 5 Jun 2024 11:27:20 +0000 (13:27 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 19 Jun 2024 18:17:25 +0000 (14:17 -0400)
This reverts commit b8c415e3bf98 ("drm/amdgpu: take runtime pm reference
when we attach a buffer") and commit 425285d39afd ("drm/amdgpu: add amdgpu
runpm usage trace for separate funcs").

Taking a runtime pm reference for DMA-buf is actually completely
unnecessary and even dangerous.

The problem is that calling pm_runtime_get_sync() from the DMA-buf
callbacks is illegal because we have the reservation locked here
which is also taken during resume. So this would deadlock.

When the buffer is in GTT it is still accessible even when the GPU
is powered down and when it is in VRAM the buffer gets migrated to
GTT before powering down.

The only use case which would make it mandatory to keep the runtime
pm reference would be if we pin the buffer into VRAM, and that's not
something we currently do.

v2: improve the commit message

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
CC: stable@vger.kernel.org
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h

index 055ba2ea4c126f621890b56c56ef86672e63b0c3..662d0f28f3587660ba086af9b95263c5db850fcb 100644 (file)
@@ -41,8 +41,6 @@
 #include <linux/dma-buf.h>
 #include <linux/dma-fence-array.h>
 #include <linux/pci-p2pdma.h>
-#include <linux/pm_runtime.h>
-#include "amdgpu_trace.h"
 
 /**
  * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
@@ -58,42 +56,11 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
        struct drm_gem_object *obj = dmabuf->priv;
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-       int r;
 
        if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
                attach->peer2peer = false;
 
-       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
-       trace_amdgpu_runpm_reference_dumps(1, __func__);
-       if (r < 0)
-               goto out;
-
        return 0;
-
-out:
-       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-       trace_amdgpu_runpm_reference_dumps(0, __func__);
-       return r;
-}
-
-/**
- * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
- *
- * @dmabuf: DMA-buf where we remove the attachment from
- * @attach: the attachment to remove
- *
- * Called when an attachment is removed from the DMA-buf.
- */
-static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
-                                 struct dma_buf_attachment *attach)
-{
-       struct drm_gem_object *obj = dmabuf->priv;
-       struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-
-       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
-       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-       trace_amdgpu_runpm_reference_dumps(0, __func__);
 }
 
 /**
@@ -267,7 +234,6 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
 
 const struct dma_buf_ops amdgpu_dmabuf_ops = {
        .attach = amdgpu_dma_buf_attach,
-       .detach = amdgpu_dma_buf_detach,
        .pin = amdgpu_dma_buf_pin,
        .unpin = amdgpu_dma_buf_unpin,
        .map_dma_buf = amdgpu_dma_buf_map,
index 10832b4704484b9d308beb47375240942dd93a0f..bc3ac73b6b8d0c710fb436312fb1e558bde5aae6 100644 (file)
@@ -181,7 +181,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
        amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
                               seq, flags | AMDGPU_FENCE_FLAG_INT);
        pm_runtime_get_noresume(adev_to_drm(adev)->dev);
-       trace_amdgpu_runpm_reference_dumps(1, __func__);
        ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
        if (unlikely(rcu_dereference_protected(*ptr, 1))) {
                struct dma_fence *old;
@@ -309,7 +308,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
                dma_fence_put(fence);
                pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
                pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-               trace_amdgpu_runpm_reference_dumps(0, __func__);
        } while (last_seq != seq);
 
        return true;
index 7aafeb763e5dde1d51d6b0648cd8195ff9b57c07..383fce40d4dd7e549f3ef0179afef6b87d1f25d3 100644 (file)
@@ -554,21 +554,6 @@ TRACE_EVENT(amdgpu_reset_reg_dumps,
                      __entry->value)
 );
 
-TRACE_EVENT(amdgpu_runpm_reference_dumps,
-           TP_PROTO(uint32_t index, const char *func),
-           TP_ARGS(index, func),
-           TP_STRUCT__entry(
-                            __field(uint32_t, index)
-                            __string(func, func)
-                            ),
-           TP_fast_assign(
-                          __entry->index = index;
-                          __assign_str(func);
-                          ),
-           TP_printk("amdgpu runpm reference dump 0x%x: 0x%s\n",
-                     __entry->index,
-                     __get_str(func))
-);
 #undef AMDGPU_JOB_GET_TIMELINE_NAME
 #endif