]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
drm/amdgpu: reject gang submit on reserved VMIDs
authorChristian König <christian.koenig@amd.com>
Fri, 19 Jan 2024 13:57:29 +0000 (14:57 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 8 Jul 2024 20:50:53 +0000 (16:50 -0400)
A gang submit won't work if the VMID is reserved and we can't flush out
VM changes from multiple engines at the same time.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h

index ec888fc6ead8df0ce52ec00439e5f22ca7f4e9ff..916b6b8cf7d9a639021f1f2703be6434b60fa4f9 100644 (file)
@@ -1093,6 +1093,21 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
        unsigned int i;
        int r;
 
+       /*
+        * We can't use gang submit on with reserved VMIDs when the VM changes
+        * can't be invalidated by more than one engine at the same time.
+        */
+       if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) {
+               for (i = 0; i < p->gang_size; ++i) {
+                       struct drm_sched_entity *entity = p->entities[i];
+                       struct drm_gpu_scheduler *sched = entity->rq->sched;
+                       struct amdgpu_ring *ring = to_amdgpu_ring(sched);
+
+                       if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
+                               return -EINVAL;
+               }
+       }
+
        r = amdgpu_vm_clear_freed(adev, vm, NULL);
        if (r)
                return r;
index b5b9d4f40f535bb7531cfa83cd84601e12048317..b6a8bddada4c30cb11759b402115a0ad9148ff3f 100644 (file)
@@ -424,7 +424,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
        if (r || !idle)
                goto error;
 
-       if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) {
+       if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
                r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
                if (r || !id)
                        goto error;
@@ -474,6 +474,19 @@ error:
        return r;
 }
 
+/*
+ * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
+ * @vm: the VM to check
+ * @vmhub: the VMHUB which will be used
+ *
+ * Returns: True if the VM will use a reserved VMID.
+ */
+bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
+{
+       return vm->reserved_vmid[vmhub] ||
+               (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)));
+}
+
 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
                               unsigned vmhub)
 {
index fa8c42c83d5d26bc6d90455b5e7b51b627c5b5d7..240fa675126029a0050a44cd4d5065eefcf70bef 100644 (file)
@@ -78,6 +78,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 
 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
                               struct amdgpu_vmid *id);
+bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
                                unsigned vmhub);
 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,