]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
drm/amdgpu: revert "rework reserved VMID handling" v2
authorChristian König <christian.koenig@amd.com>
Fri, 19 Sep 2025 07:27:03 +0000 (09:27 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 25 Sep 2025 19:39:00 +0000 (15:39 -0400)
This reverts commit e44a0fe630c58b0a87d8281f5c1077a3479e5fce.

Initially we used VMID reservation to enforce isolation between
processes. That has now been replaced by proper fence handling.

Both OpenGL, RADV and ROCm developers requested a way to reserve a VMID
for SPM, so restore that approach by reverting back to only allowing a
single process to use the reserved VMID.

Only compile tested for now.

v2: use -ENOENT instead of -EINVAL if VMID is not available

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h

index cbdf108612d2c3a2c4a857e01b4ee125a52f765c..3ef5bc95642cab58d882f109badaa5ddc8368be9 100644 (file)
@@ -275,13 +275,12 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 {
        struct amdgpu_device *adev = ring->adev;
        unsigned vmhub = ring->vm_hub;
-       struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
        uint64_t fence_context = adev->fence_context + ring->idx;
        bool needs_flush = vm->use_cpu_for_update;
        uint64_t updates = amdgpu_vm_tlb_seq(vm);
        int r;
 
-       *id = id_mgr->reserved;
+       *id = vm->reserved_vmid[vmhub];
        if ((*id)->owner != vm->immediate.fence_context ||
            !amdgpu_vmid_compatible(*id, job) ||
            (*id)->flushed_updates < updates ||
@@ -474,40 +473,61 @@ bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
        return vm->reserved_vmid[vmhub];
 }
 
-int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+/*
+ * amdgpu_vmid_alloc_reserved - reserve a specific VMID for this vm
+ * @adev: amdgpu device structure
+ * @vm: the VM to reserve an ID for
+ * @vmhub: the VMHUB which should be used
+ *
+ * Mostly used to have a reserved VMID for debugging and SPM.
+ *
+ * Returns: 0 for success, -ENOENT if an ID is already reserved.
+ */
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                               unsigned vmhub)
 {
        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+       struct amdgpu_vmid *id;
+       int r = 0;
 
        mutex_lock(&id_mgr->lock);
-
-       ++id_mgr->reserved_use_count;
-       if (!id_mgr->reserved) {
-               struct amdgpu_vmid *id;
-
-               id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
-                                     list);
-               /* Remove from normal round robin handling */
-               list_del_init(&id->list);
-               id_mgr->reserved = id;
+       if (vm->reserved_vmid[vmhub])
+               goto unlock;
+       if (id_mgr->reserved_vmid) {
+               r = -ENOENT;
+               goto unlock;
        }
-
+       /* Remove from normal round robin handling */
+       id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
+       list_del_init(&id->list);
+       vm->reserved_vmid[vmhub] = id;
+       id_mgr->reserved_vmid = true;
        mutex_unlock(&id_mgr->lock);
+
        return 0;
+unlock:
+       mutex_unlock(&id_mgr->lock);
+       return r;
 }
 
-void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+/*
+ * amdgpu_vmid_free_reserved - free up a reserved VMID again
+ * @adev: amdgpu device structure
+ * @vm: the VM with the reserved ID
+ * @vmhub: the VMHUB which should be used
+ */
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                               unsigned vmhub)
 {
        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 
        mutex_lock(&id_mgr->lock);
-       if (!--id_mgr->reserved_use_count) {
-               /* give the reserved ID back to normal round robin */
-               list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
-               id_mgr->reserved = NULL;
+       if (vm->reserved_vmid[vmhub]) {
+               list_add(&vm->reserved_vmid[vmhub]->list,
+                       &id_mgr->ids_lru);
+               vm->reserved_vmid[vmhub] = NULL;
+               id_mgr->reserved_vmid = false;
        }
-
        mutex_unlock(&id_mgr->lock);
 }
 
@@ -574,7 +594,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
 
                mutex_init(&id_mgr->lock);
                INIT_LIST_HEAD(&id_mgr->ids_lru);
-               id_mgr->reserved_use_count = 0;
 
                /* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */
                if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0))
index 240fa675126029a0050a44cd4d5065eefcf70bef..b3649cd3af5699ef7331eedee54b042cdcdd99d7 100644 (file)
@@ -67,8 +67,7 @@ struct amdgpu_vmid_mgr {
        unsigned                num_ids;
        struct list_head        ids_lru;
        struct amdgpu_vmid      ids[AMDGPU_NUM_VMID];
-       struct amdgpu_vmid      *reserved;
-       unsigned int            reserved_use_count;
+       bool                    reserved_vmid;
 };
 
 int amdgpu_pasid_alloc(unsigned int bits);
@@ -79,10 +78,10 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
                               struct amdgpu_vmid *id);
 bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
-int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
-                               unsigned vmhub);
-void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
-                               unsigned vmhub);
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                              unsigned vmhub);
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                              unsigned vmhub);
 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                     struct amdgpu_job *job, struct dma_fence **fence);
 void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
index 112ce584a5ada167dd6a997e01c38a26452a508a..8c28e8923f02538e342140e669d962c4e3a1fade 100644 (file)
@@ -2790,10 +2790,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        dma_fence_put(vm->last_update);
 
        for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
-               if (vm->reserved_vmid[i]) {
-                       amdgpu_vmid_free_reserved(adev, i);
-                       vm->reserved_vmid[i] = false;
-               }
+               amdgpu_vmid_free_reserved(adev, vm, i);
        }
 
        ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
@@ -2889,6 +2886,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        union drm_amdgpu_vm *args = data;
        struct amdgpu_device *adev = drm_to_adev(dev);
        struct amdgpu_fpriv *fpriv = filp->driver_priv;
+       struct amdgpu_vm *vm = &fpriv->vm;
 
        /* No valid flags defined yet */
        if (args->in.flags)
@@ -2897,17 +2895,10 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        switch (args->in.op) {
        case AMDGPU_VM_OP_RESERVE_VMID:
                /* We only have requirement to reserve vmid from gfxhub */
-               if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
-                       amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
-                       fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
-               }
-
+               amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
                break;
        case AMDGPU_VM_OP_UNRESERVE_VMID:
-               if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
-                       amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
-                       fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
-               }
+               amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
                break;
        default:
                return -EINVAL;
index 988e970d9e9604c2c2fcac9f191c9f3bc823fd71..adc5c9161fa84adfdb03f9329f1904311885e621 100644 (file)
@@ -415,7 +415,7 @@ struct amdgpu_vm {
        struct dma_fence        *last_unlocked;
 
        unsigned int            pasid;
-       bool                    reserved_vmid[AMDGPU_MAX_VMHUBS];
+       struct amdgpu_vmid      *reserved_vmid[AMDGPU_MAX_VMHUBS];
 
        /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
        bool                                    use_cpu_for_update;