return -ENOMEM;
mutex_lock(&mgr->lock);
- r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
+ r = xa_alloc(&mgr->ctx_handles, id, ctx,
+ XA_LIMIT(0, AMDGPU_VM_MAX_NUM_CTX - 1), GFP_KERNEL);
if (r < 0) {
mutex_unlock(&mgr->lock);
kfree(ctx);
return r;
}
- *id = (uint32_t)r;
r = amdgpu_ctx_init(adev, priority, filp, ctx);
if (r) {
- idr_remove(&mgr->ctx_handles, *id);
+ xa_erase(&mgr->ctx_handles, *id);
*id = 0;
kfree(ctx);
}
struct amdgpu_ctx *ctx;
mutex_lock(&mgr->lock);
- ctx = idr_remove(&mgr->ctx_handles, id);
+ ctx = xa_erase(&mgr->ctx_handles, id);
if (ctx)
kref_put(&ctx->refcount, amdgpu_ctx_do_release);
mutex_unlock(&mgr->lock);
mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
- ctx = idr_find(&mgr->ctx_handles, id);
+ ctx = xa_load(&mgr->ctx_handles, id);
if (!ctx) {
mutex_unlock(&mgr->lock);
return -EINVAL;
mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
- ctx = idr_find(&mgr->ctx_handles, id);
+ ctx = xa_load(&mgr->ctx_handles, id);
if (!ctx) {
mutex_unlock(&mgr->lock);
return -EINVAL;
mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
- ctx = idr_find(&mgr->ctx_handles, id);
+ ctx = xa_load(&mgr->ctx_handles, id);
if (ctx)
kref_get(&ctx->refcount);
mutex_unlock(&mgr->lock);
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
{
mutex_init(&mgr->lock);
- idr_init(&mgr->ctx_handles);
+ xa_init_flags(&mgr->ctx_handles, XA_FLAGS_ALLOC1);
}
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
{
unsigned num_entities = amdgput_ctx_total_num_entities();
struct amdgpu_ctx *ctx;
- struct idr *idp;
- uint32_t id, i;
-
- idp = &mgr->ctx_handles;
+ unsigned long id, i;
mutex_lock(&mgr->lock);
- idr_for_each_entry(idp, ctx, id) {
+ xa_for_each(&mgr->ctx_handles, id, ctx) {
for (i = 0; i < num_entities; i++) {
struct drm_sched_entity *entity;
{
unsigned num_entities = amdgput_ctx_total_num_entities();
struct amdgpu_ctx *ctx;
- struct idr *idp;
- uint32_t id, i;
-
- idp = &mgr->ctx_handles;
+ unsigned long id, i;
- idr_for_each_entry(idp, ctx, id) {
+ xa_for_each(&mgr->ctx_handles, id, ctx) {
if (kref_read(&ctx->refcount) != 1) {
DRM_ERROR("ctx %p is still alive\n", ctx);
continue;
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
- struct idr *idp;
- uint32_t id;
+ unsigned long id;
amdgpu_ctx_mgr_entity_fini(mgr);
- idp = &mgr->ctx_handles;
-
- idr_for_each_entry(idp, ctx, id) {
+ xa_for_each(&mgr->ctx_handles, id, ctx) {
if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
DRM_ERROR("ctx %p is still alive\n", ctx);
}
-
- idr_destroy(&mgr->ctx_handles);
+ xa_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock);
}