]> www.infradead.org Git - users/hch/misc.git/commitdiff
drm/amdgpu: validate userq buffer virtual address and size
authorPrike Liang <Prike.Liang@amd.com>
Mon, 23 Jun 2025 08:29:38 +0000 (16:29 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 15 Sep 2025 20:52:15 +0000 (16:52 -0400)
It needs to validate the userq object virtual address to
determine whether it is residented in a valid vm mapping.

Signed-off-by: Prike Liang <Prike.Liang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
drivers/gpu/drm/amd/amdgpu/mes_userqueue.c

index 09cea573e5ad93667fd91b8b87c19cda6a47d7ec..c3c1a714b06abb8cad1987730efea8f85e474ae3 100644 (file)
@@ -44,6 +44,38 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
        return userq_ip_mask;
 }
 
+int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
+                                  u64 expected_size)
+{
+       struct amdgpu_bo_va_mapping *va_map;
+       u64 user_addr;
+       u64 size;
+       int r = 0;
+
+       user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
+       size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
+
+       r = amdgpu_bo_reserve(vm->root.bo, false);
+       if (r)
+               return r;
+
+       va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+       if (!va_map) {
+               r = -EINVAL;
+               goto out_err;
+       }
+       /* Only validate the userq whether resident in the VM mapping range */
+       if (user_addr >= va_map->start  &&
+           va_map->last - user_addr + 1 >= size) {
+               amdgpu_bo_unreserve(vm->root.bo);
+               return 0;
+       }
+
+out_err:
+       amdgpu_bo_unreserve(vm->root.bo);
+       return r;
+}
+
 static int
 amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
                          struct amdgpu_usermode_queue *queue)
@@ -428,6 +460,14 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
                r = -ENOMEM;
                goto unlock;
        }
+
+       /* Validate the userq virtual address.*/
+       if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) ||
+           amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
+           amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
+               kfree(queue);
+               goto unlock;
+       }
        queue->doorbell_handle = args->in.doorbell_handle;
        queue->queue_type = args->in.ip_type;
        queue->vm = &fpriv->vm;
index da7e75cda4f83e6615b95c679b9a9f20695a7555..c027dd9166727fd64d46e2b246871f64bf5f7809 100644 (file)
@@ -137,4 +137,6 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
                                                   u32 idx);
 
+int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
+                                  u64 expected_size);
 #endif
index 66467f41294c19dcb5fe2522cf2384bfb140e329..3a4fd6de08ce7d63fb22744a1070aae811ad4ce4 100644 (file)
@@ -254,6 +254,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
        struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
        struct drm_amdgpu_userq_in *mqd_user = args_in;
        struct amdgpu_mqd_prop *userq_props;
+       struct amdgpu_gfx_shadow_info shadow_info;
        int r;
 
        /* Structure to initialize MQD for userqueue using generic MQD init function */
@@ -279,6 +280,8 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
        userq_props->doorbell_index = queue->doorbell_index;
        userq_props->fence_address = queue->fence_drv->gpu_addr;
 
+       if (adev->gfx.funcs->get_gfx_shadow_info)
+               adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
        if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
                struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
 
@@ -295,6 +298,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
                        goto free_mqd;
                }
 
+               if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va,
+                   max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE)))
+                       goto free_mqd;
+
                userq_props->eop_gpu_addr = compute_mqd->eop_va;
                userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
                userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
@@ -322,6 +329,11 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
                userq_props->csa_addr = mqd_gfx_v11->csa_va;
                userq_props->tmz_queue =
                        mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+
+               if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va,
+                   shadow_info.shadow_size))
+                       goto free_mqd;
+
                kfree(mqd_gfx_v11);
        } else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
                struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
@@ -339,6 +351,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
                        goto free_mqd;
                }
 
+               if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va,
+                   shadow_info.csa_size))
+                       goto free_mqd;
+
                userq_props->csa_addr = mqd_sdma_v11->csa_va;
                kfree(mqd_sdma_v11);
        }