]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
drm/amdgpu/gfx8: add ring reset callback for gfx
authorAlex Deucher <alexander.deucher@amd.com>
Thu, 18 Jul 2024 19:50:23 +0000 (15:50 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 16 Aug 2024 18:24:09 +0000 (14:24 -0400)
Add ring reset callback for gfx.

v2: fix operator precedence (kernel test robot)

Acked-by: Vitaly Prosyak <vitaly.prosyak@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/vid.h

index a1963e6c5cab126a168f5cf17b6ed0f12a1f81e3..bc8295812cc8429f9f0a14cc4d015f1e8b447219 100644 (file)
@@ -6149,6 +6149,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
 {
        bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
        bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+       bool exec = flags & AMDGPU_FENCE_FLAG_EXEC;
 
        /* Workaround for cache flush problems. First send a dummy EOP
         * event down the pipe with seq one below.
@@ -6172,7 +6173,8 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
                                 EOP_TC_ACTION_EN |
                                 EOP_TC_WB_ACTION_EN |
                                 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
-                                EVENT_INDEX(5)));
+                                EVENT_INDEX(5) |
+                                (exec ? EOP_EXEC : 0)));
        amdgpu_ring_write(ring, addr & 0xfffffffc);
        amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
                          DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
@@ -6380,6 +6382,34 @@ static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
        amdgpu_ring_write(ring, val);
 }
 
+static void gfx_v8_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
+                                 int mem_space, int opt, uint32_t addr0,
+                                 uint32_t addr1, uint32_t ref, uint32_t mask,
+                                 uint32_t inv)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+       amdgpu_ring_write(ring,
+                         /* memory (1) or register (0) */
+                         (WAIT_REG_MEM_MEM_SPACE(mem_space) |
+                          WAIT_REG_MEM_OPERATION(opt) | /* wait */
+                          WAIT_REG_MEM_FUNCTION(3) |  /* equal */
+                          WAIT_REG_MEM_ENGINE(eng_sel)));
+
+       if (mem_space)
+               BUG_ON(addr0 & 0x3); /* Dword align */
+       amdgpu_ring_write(ring, addr0);
+       amdgpu_ring_write(ring, addr1);
+       amdgpu_ring_write(ring, ref);
+       amdgpu_ring_write(ring, mask);
+       amdgpu_ring_write(ring, inv); /* poll interval */
+}
+
+static void gfx_v8_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+                                       uint32_t val, uint32_t mask)
+{
+       gfx_v8_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
+}
+
 static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
 {
        struct amdgpu_device *adev = ring->adev;
@@ -6856,6 +6886,48 @@ static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
 
 }
 
+static int gfx_v8_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+       struct amdgpu_ring *kiq_ring = &kiq->ring;
+       unsigned long flags;
+       u32 tmp;
+       int r;
+
+       if (amdgpu_sriov_vf(adev))
+               return -EINVAL;
+
+       if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+               return -EINVAL;
+
+       spin_lock_irqsave(&kiq->ring_lock, flags);
+
+       if (amdgpu_ring_alloc(kiq_ring, 5)) {
+               spin_unlock_irqrestore(&kiq->ring_lock, flags);
+               return -ENOMEM;
+       }
+
+       tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+       gfx_v8_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
+       amdgpu_ring_commit(kiq_ring);
+
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+       r = amdgpu_ring_test_ring(kiq_ring);
+       if (r)
+               return r;
+
+       if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
+               return -ENOMEM;
+       gfx_v8_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
+                                    ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
+       gfx_v8_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
+       gfx_v8_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
+
+       return amdgpu_ring_test_ring(ring);
+}
+
 static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
        .name = "gfx_v8_0",
        .early_init = gfx_v8_0_early_init,
@@ -6923,6 +6995,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
        .emit_wreg = gfx_v8_0_ring_emit_wreg,
        .soft_recovery = gfx_v8_0_ring_soft_recovery,
        .emit_mem_sync = gfx_v8_0_emit_mem_sync,
+       .reset = gfx_v8_0_reset_kgq,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
index 80ce42aacc0cce3d7819c8037f27ddf0f9e8aa86..b61f6b838ec2c7931e8caf3c74e285bde53646a6 100644 (file)
                 * 1 - Stream
                 * 2 - Bypass
                 */
+#define                EOP_EXEC                                (1 << 28) /* For Trailing Fence */
 #define                DATA_SEL(x)                             ((x) << 29)
                /* 0 - discard
                 * 1 - send low 32bit data