]> www.infradead.org Git - users/hch/misc.git/commitdiff
drm/amdgpu: give each kernel job a unique id
authorPierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Wed, 4 Jun 2025 12:28:23 +0000 (14:28 +0200)
committerArunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
Mon, 1 Sep 2025 05:19:31 +0000 (10:49 +0530)
Userspace jobs have drm_file.client_id as a unique identifier
as job's owners. For kernel jobs, we can allocate arbitrary
values - the risk of overlap with userspace ids is small (given
that it's a u64 value).
In the unlikely case the overlap happens, it'll only impact
trace events.

Since this ID is traced in the gpu_scheduler trace events, this
allows to determine the source of each job sent to the hardware.

To make grepping easier, the IDs are defined as they will appear
in the trace output.

Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
Link: https://lore.kernel.org/r/20250604122827.2191-1-pierre-eric.pelloux-prayer@amd.com
19 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c

index c80c8f54353211d96222936588929c809f58015b..98aa99b314c96f9361bfa83894b09b93046f496c 100644 (file)
@@ -1474,7 +1474,8 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
        owner = (void *)(unsigned long)atomic_inc_return(&counter);
 
        r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
-                                    64, 0, &job);
+                                    64, 0, &job,
+                                    AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER);
        if (r)
                goto err;
 
index 97b562a79ea8ee04c3b5321ef93117a311160a75..9dcf51991b5b631094ad7ebcfd1987c689774aee 100644 (file)
@@ -690,7 +690,7 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
        r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr,
                                     AMDGPU_FENCE_OWNER_UNDEFINED,
                                     16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
-                                    &job);
+                                    &job, AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB);
        if (r)
                goto error_alloc;
 
index 9b1c551159212847f84b7b67aa80616528abe271..d020a890a0ea42e4e11189f207cdacc721ef29fb 100644 (file)
@@ -209,11 +209,12 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
                             struct drm_sched_entity *entity, void *owner,
                             size_t size, enum amdgpu_ib_pool_type pool_type,
-                            struct amdgpu_job **job)
+                            struct amdgpu_job **job, u64 k_job_id)
 {
        int r;
 
-       r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, 0);
+       r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job,
+                            k_job_id);
        if (r)
                return r;
 
index 2f302266662bcb1997ce62a77760a082737189a1..4a6487eb6cb59af54d977e0697a3e0862871c48c 100644 (file)
 struct amdgpu_fence;
 enum amdgpu_ib_pool_type;
 
+/* Internal kernel job ids. (decreasing values, starting from U64_MAX). */
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE              (18446744073709551615ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES         (18446744073709551614ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE        (18446744073709551613ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR            (18446744073709551612ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER         (18446744073709551611ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA (18446744073709551610ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER        (18446744073709551609ULL)
+#define AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE       (18446744073709551608ULL)
+#define AMDGPU_KERNEL_JOB_ID_MOVE_BLIT              (18446744073709551607ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER       (18446744073709551606ULL)
+#define AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER         (18446744073709551605ULL)
+#define AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB          (18446744073709551604ULL)
+#define AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP           (18446744073709551603ULL)
+#define AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST          (18446744073709551602ULL)
+
 struct amdgpu_job {
        struct drm_sched_job    base;
        struct amdgpu_vm        *vm;
@@ -96,7 +112,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
                             struct drm_sched_entity *entity, void *owner,
                             size_t size, enum amdgpu_ib_pool_type pool_type,
-                            struct amdgpu_job **job);
+                            struct amdgpu_job **job,
+                            u64 k_job_id);
 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
                              struct amdgpu_bo *gws, struct amdgpu_bo *oa);
 void amdgpu_job_free_resources(struct amdgpu_job *job);
index 82d58ac7afb011b0a4b72cc3b1fed6b9a8dde1f9..4980595bcddd0ae9fd4a51297b922ae81b5257c9 100644 (file)
@@ -194,7 +194,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
-                                    AMDGPU_IB_POOL_DIRECT, &job);
+                                    AMDGPU_IB_POOL_DIRECT, &job,
+                                    AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
        if (r)
                return r;
 
index 122a882948839464dc197d40ff8e46cf161f7b42..d18bade9c98f6881d39041caaab4f79692a06a91 100644 (file)
@@ -1313,7 +1313,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
        if (r)
                goto out;
 
-       r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true);
+       r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true,
+                              AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
        if (WARN_ON(r))
                goto out;
 
index 27ab4e754b2a9f431d7a5269ebf7f711ed3c06c8..42826504681551385c32639ab3516c3969bbcdf5 100644 (file)
@@ -226,7 +226,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
        r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
                                     AMDGPU_FENCE_OWNER_UNDEFINED,
                                     num_dw * 4 + num_bytes,
-                                    AMDGPU_IB_POOL_DELAYED, &job);
+                                    AMDGPU_IB_POOL_DELAYED, &job,
+                                    AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER);
        if (r)
                return r;
 
@@ -406,7 +407,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
                struct dma_fence *wipe_fence = NULL;
 
                r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
-                                      false);
+                                      false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
                if (r) {
                        goto error;
                } else if (wipe_fence) {
@@ -1510,7 +1511,8 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
        r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
                                     AMDGPU_FENCE_OWNER_UNDEFINED,
                                     num_dw * 4, AMDGPU_IB_POOL_DELAYED,
-                                    &job);
+                                    &job,
+                                    AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA);
        if (r)
                goto out;
 
@@ -2167,7 +2169,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
                                  struct dma_resv *resv,
                                  bool vm_needs_flush,
                                  struct amdgpu_job **job,
-                                 bool delayed)
+                                 bool delayed, u64 k_job_id)
 {
        enum amdgpu_ib_pool_type pool = direct_submit ?
                AMDGPU_IB_POOL_DIRECT :
@@ -2177,7 +2179,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
                                                    &adev->mman.high_pr;
        r = amdgpu_job_alloc_with_ib(adev, entity,
                                     AMDGPU_FENCE_OWNER_UNDEFINED,
-                                    num_dw * 4, pool, job);
+                                    num_dw * 4, pool, job, k_job_id);
        if (r)
                return r;
 
@@ -2217,7 +2219,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
        num_loops = DIV_ROUND_UP(byte_count, max_bytes);
        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
        r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
-                                  resv, vm_needs_flush, &job, false);
+                                  resv, vm_needs_flush, &job, false,
+                                  AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
        if (r)
                return r;
 
@@ -2252,7 +2255,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
                               uint64_t dst_addr, uint32_t byte_count,
                               struct dma_resv *resv,
                               struct dma_fence **fence,
-                              bool vm_needs_flush, bool delayed)
+                              bool vm_needs_flush, bool delayed,
+                              u64 k_job_id)
 {
        struct amdgpu_device *adev = ring->adev;
        unsigned int num_loops, num_dw;
@@ -2265,7 +2269,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
        num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
        r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
-                                  &job, delayed);
+                                  &job, delayed, k_job_id);
        if (r)
                return r;
 
@@ -2335,7 +2339,8 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
                        goto err;
 
                r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
-                                       &next, true, true);
+                                       &next, true, true,
+                                       AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
                if (r)
                        goto err;
 
@@ -2354,7 +2359,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                        uint32_t src_data,
                        struct dma_resv *resv,
                        struct dma_fence **f,
-                       bool delayed)
+                       bool delayed,
+                       u64 k_job_id)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
@@ -2384,7 +2390,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                        goto error;
 
                r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
-                                       &next, true, delayed);
+                                       &next, true, delayed, k_job_id);
                if (r)
                        goto error;
 
index 2309df3f68a9cb54464398b5676082a0ca594aad..d82d107fdcc640cf7f2606f10c480d8751f90dfe 100644 (file)
@@ -182,7 +182,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                        uint32_t src_data,
                        struct dma_resv *resv,
                        struct dma_fence **fence,
-                       bool delayed);
+                       bool delayed,
+                       u64 k_job_id);
 
 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
index 74758b5ffc6c8fb28081e7a5242c247b169a955f..5c38f0d30c87abe4c127cad6ada02e10ca28a5df 100644 (file)
@@ -1136,7 +1136,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
        r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
                                     AMDGPU_FENCE_OWNER_UNDEFINED,
                                     64, direct ? AMDGPU_IB_POOL_DIRECT :
-                                    AMDGPU_IB_POOL_DELAYED, &job);
+                                    AMDGPU_IB_POOL_DELAYED, &job,
+                                    AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
        if (r)
                return r;
 
index b9060bcd48064d659bb930c57ed3730b4328b2f0..ce318f5de047ada814a034164c22596cff38cb72 100644 (file)
@@ -449,7 +449,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
        r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
                                     AMDGPU_FENCE_OWNER_UNDEFINED,
                                     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
-                                    &job);
+                                    &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
        if (r)
                return r;
 
@@ -540,7 +540,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
                                     AMDGPU_FENCE_OWNER_UNDEFINED,
                                     ib_size_dw * 4,
                                     direct ? AMDGPU_IB_POOL_DIRECT :
-                                    AMDGPU_IB_POOL_DELAYED, &job);
+                                    AMDGPU_IB_POOL_DELAYED, &job,
+                                    AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
        if (r)
                return r;
 
index f1f67521c29cab315b2c09bfbcf37d5dda20a88a..d287c44ddbc16acd103b3445c1264ce4a401ff49 100644 (file)
@@ -601,7 +601,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
 
        r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
                                     64, AMDGPU_IB_POOL_DIRECT,
-                                    &job);
+                                    &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
        if (r)
                goto err;
 
@@ -781,7 +781,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
 
        r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
                                     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
-                                    &job);
+                                    &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
        if (r)
                goto err;
 
@@ -911,7 +911,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
 
        r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
                                     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
-                                    &job);
+                                    &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
        if (r)
                return r;
 
@@ -978,7 +978,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
 
        r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
                                     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
-                                    &job);
+                                    &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
        if (r)
                return r;
 
index 0b87798daebd31a3c204036cc8c82df1cabc74e5..314f11e6c78b436f4b1ee45aeb52466cdd675014 100644 (file)
@@ -977,7 +977,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
        params.vm = vm;
        params.immediate = immediate;
 
-       r = vm->update_funcs->prepare(&params, NULL);
+       r = vm->update_funcs->prepare(&params, NULL,
+                                     AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES);
        if (r)
                goto error;
 
@@ -1146,7 +1147,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                dma_fence_put(tmp);
        }
 
-       r = vm->update_funcs->prepare(&params, sync);
+       r = vm->update_funcs->prepare(&params, sync,
+                                     AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE);
        if (r)
                goto error_free;
 
index fd086efd8457e39bf74555a3eff90fb6cea60df1..ff2c8c6f4d7ccb3440420029319ba383e1cdf70a 100644 (file)
@@ -308,7 +308,7 @@ struct amdgpu_vm_update_params {
 struct amdgpu_vm_update_funcs {
        int (*map_table)(struct amdgpu_bo_vm *bo);
        int (*prepare)(struct amdgpu_vm_update_params *p,
-                      struct amdgpu_sync *sync);
+                      struct amdgpu_sync *sync, u64 k_job_id);
        int (*update)(struct amdgpu_vm_update_params *p,
                      struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
                      unsigned count, uint32_t incr, uint64_t flags);
index 0c1ef5850a5eba0d22c0878157048aadad6fd943..22e2e5b4734154cfa961a417f78797e42aa1e992 100644 (file)
@@ -40,12 +40,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
  *
  * @p: see amdgpu_vm_update_params definition
  * @sync: sync obj with fences to wait on
+ * @k_job_id: the id for tracing/debug purposes
  *
  * Returns:
  * Negativ errno, 0 for success.
  */
 static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
-                                struct amdgpu_sync *sync)
+                                struct amdgpu_sync *sync,
+                                u64 k_job_id)
 {
        if (!sync)
                return 0;
index 30022123b0bf6da7e982147f6dead797bad42a47..f794fb1cc06e66d245a6342f83d2d9df5dc03be5 100644 (file)
@@ -26,6 +26,7 @@
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 #include "amdgpu_vm.h"
+#include "amdgpu_job.h"
 
 /*
  * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
@@ -395,7 +396,8 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        params.vm = vm;
        params.immediate = immediate;
 
-       r = vm->update_funcs->prepare(&params, NULL);
+       r = vm->update_funcs->prepare(&params, NULL,
+                                     AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR);
        if (r)
                goto exit;
 
index 46d9fb433ab2a338e219d924c68af7053726df08..36805dcfa15988f1a57f508d9d39c6c8fbe4dbac 100644 (file)
@@ -40,7 +40,7 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
 
 /* Allocate a new job for @count PTE updates */
 static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
-                                   unsigned int count)
+                                   unsigned int count, u64 k_job_id)
 {
        enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
                : AMDGPU_IB_POOL_DELAYED;
@@ -56,7 +56,7 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
        ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
 
        r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
-                                    ndw * 4, pool, &p->job);
+                                    ndw * 4, pool, &p->job, k_job_id);
        if (r)
                return r;
 
@@ -69,16 +69,17 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
  *
  * @p: see amdgpu_vm_update_params definition
  * @sync: amdgpu_sync object with fences to wait for
+ * @k_job_id: identifier of the job, for tracing purpose
  *
  * Returns:
  * Negativ errno, 0 for success.
  */
 static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
-                                 struct amdgpu_sync *sync)
+                                 struct amdgpu_sync *sync, u64 k_job_id)
 {
        int r;
 
-       r = amdgpu_vm_sdma_alloc_job(p, 0);
+       r = amdgpu_vm_sdma_alloc_job(p, 0, k_job_id);
        if (r)
                return r;
 
@@ -249,7 +250,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
                        if (r)
                                return r;
 
-                       r = amdgpu_vm_sdma_alloc_job(p, count);
+                       r = amdgpu_vm_sdma_alloc_job(p, count,
+                                                    AMDGPU_KERNEL_JOB_ID_VM_UPDATE);
                        if (r)
                                return r;
                }
index 1c07b701d0e4f16c35fbdde0d1f1d47a67969fad..ceb94bbb03a48fbb87b6acde664d2ecc1b29dddc 100644 (file)
@@ -217,7 +217,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
-                                    AMDGPU_IB_POOL_DIRECT, &job);
+                                    AMDGPU_IB_POOL_DIRECT, &job,
+                                    AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
        if (r)
                return r;
 
@@ -281,7 +282,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
-                                    AMDGPU_IB_POOL_DIRECT, &job);
+                                    AMDGPU_IB_POOL_DIRECT, &job,
+                                    AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
        if (r)
                return r;
 
index 9d237b5937fb04a5b3c06387a64297b65ae94703..1f8866f3f63c71569d6aaf67dcef709f81f7f977 100644 (file)
@@ -225,7 +225,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
-                                    AMDGPU_IB_POOL_DIRECT, &job);
+                                    AMDGPU_IB_POOL_DIRECT, &job,
+                                    AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
        if (r)
                return r;
 
@@ -288,7 +289,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
-                                    AMDGPU_IB_POOL_DIRECT, &job);
+                                    AMDGPU_IB_POOL_DIRECT, &job,
+                                    AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
        if (r)
                return r;
 
index 79251f22b70220550a80937a8c4ad92645a5b35d..683ff02c45afa51d68dd2602c7420b5761744a1a 100644 (file)
@@ -68,7 +68,8 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
                                     AMDGPU_FENCE_OWNER_UNDEFINED,
                                     num_dw * 4 + num_bytes,
                                     AMDGPU_IB_POOL_DELAYED,
-                                    &job);
+                                    &job,
+                                    AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP);
        if (r)
                return r;