amdgpu_bo_unreserve(vm->root.base.bo);
if (pasid) {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
- r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
- GFP_ATOMIC);
- spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ r = xa_insert_irq(&adev->vm_manager.vms, pasid, vm, GFP_KERNEL);
if (r < 0)
goto error_free_root;
goto unreserve_bo;
if (pasid) {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
- r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
- GFP_ATOMIC);
- spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
-
- if (r == -ENOSPC)
+ r = xa_insert_irq(&adev->vm_manager.vms, pasid, vm, GFP_KERNEL);
+ if (r < 0)
goto unreserve_bo;
- r = 0;
}
/* Check if PD needs to be reinitialized and do it before
vm->pte_support_ats = pte_support_ats;
r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo);
if (r)
- goto free_idr;
+ goto erase;
}
/* Update VM state */
"CPU update of VM recommended only for large BAR system\n");
if (vm->pasid) {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
- idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
- spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ xa_erase_irq(&adev->vm_manager.vms, vm->pasid);
/* Free the original amdgpu allocated pasid
* Will be replaced with kfd allocated pasid
goto unreserve_bo;
-free_idr:
- if (pasid) {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
- idr_remove(&adev->vm_manager.pasid_idr, pasid);
- spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
- }
+erase:
+ if (pasid)
+ xa_erase_irq(&adev->vm_manager.vms, pasid);
unreserve_bo:
amdgpu_bo_unreserve(vm->root.base.bo);
return r;
if (vm->pasid) {
unsigned long flags;
- spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
- idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
- spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ xa_lock_irqsave(&adev->vm_manager.vms, flags);
+ __xa_erase(&adev->vm_manager.vms, vm->pasid);
+ xa_unlock_irqrestore(&adev->vm_manager.vms, flags);
}
vm->pasid = 0;
}
if (vm->pasid) {
unsigned long flags;
- spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
- idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
- spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ xa_lock_irqsave(&adev->vm_manager.vms, flags);
+ __xa_erase(&adev->vm_manager.vms, vm->pasid);
+ xa_unlock_irqrestore(&adev->vm_manager.vms, flags);
}
drm_sched_entity_destroy(&vm->entity);
adev->vm_manager.vm_update_mode = 0;
#endif
- idr_init(&adev->vm_manager.pasid_idr);
- spin_lock_init(&adev->vm_manager.pasid_lock);
+ xa_init_flags(&adev->vm_manager.vms,
+ XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
adev->vm_manager.xgmi_map_counter = 0;
mutex_init(&adev->vm_manager.lock_pstate);
*/
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
- WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
- idr_destroy(&adev->vm_manager.pasid_idr);
+ WARN_ON(!xa_empty(&adev->vm_manager.vms));
amdgpu_vmid_mgr_fini(adev);
}
struct amdgpu_vm *vm;
unsigned long flags;
- spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
-
- vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ xa_lock_irqsave(&adev->vm_manager.vms, flags);
+ vm = xa_load(&adev->vm_manager.vms, pasid);
if (vm)
*task_info = vm->task_info;
-
- spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ xa_unlock_irqrestore(&adev->vm_manager.vms, flags);
}
/**