{
struct amdgpu_bo_list *list;
- mutex_lock(&fpriv->bo_list_lock);
- list = idr_remove(&fpriv->bo_list_handles, id);
- mutex_unlock(&fpriv->bo_list_lock);
+ list = xa_erase(&fpriv->bo_list_handles, id);
if (list)
kref_put(&list->refcount, amdgpu_bo_list_free);
}
struct amdgpu_bo_list **result)
{
rcu_read_lock();
- *result = idr_find(&fpriv->bo_list_handles, id);
+ *result = xa_load(&fpriv->bo_list_handles, id);
if (*result && kref_get_unless_zero(&(*result)->refcount)) {
rcu_read_unlock();
if (r)
goto error_free;
- mutex_lock(&fpriv->bo_list_lock);
- r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
- mutex_unlock(&fpriv->bo_list_lock);
+ r = xa_alloc(&fpriv->bo_list_handles, &handle, list,
+ xa_limit_31b, GFP_KERNEL);
if (r < 0) {
amdgpu_bo_list_put(list);
return r;
}
- handle = r;
break;
case AMDGPU_BO_LIST_OP_DESTROY:
if (r)
goto error_free;
- mutex_lock(&fpriv->bo_list_lock);
- old = idr_replace(&fpriv->bo_list_handles, list, handle);
- mutex_unlock(&fpriv->bo_list_lock);
-
- if (IS_ERR(old)) {
+ old = xa_store(&fpriv->bo_list_handles, handle, list,
+ GFP_KERNEL);
+ if (xa_is_err(old)) {
amdgpu_bo_list_put(list);
- r = PTR_ERR(old);
+ r = xa_err(old);
goto error_free;
}
goto error_vm;
}
- mutex_init(&fpriv->bo_list_lock);
- idr_init(&fpriv->bo_list_handles);
+ xa_init_flags(&fpriv->bo_list_handles, XA_FLAGS_ALLOC1);
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
struct amdgpu_bo_list *list;
struct amdgpu_bo *pd;
unsigned int pasid;
- int handle;
+ unsigned long handle;
if (!fpriv)
return;
amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
amdgpu_bo_unref(&pd);
- idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
+ xa_for_each(&fpriv->bo_list_handles, handle, list)
amdgpu_bo_list_put(list);
- idr_destroy(&fpriv->bo_list_handles);
- mutex_destroy(&fpriv->bo_list_lock);
+ xa_destroy(&fpriv->bo_list_handles);
kfree(fpriv);
file_priv->driver_priv = NULL;