dma_fence_put(submit->in_fence);
if (submit->out_fence) {
/* first remove from IDR, so fence can not be found anymore */
- mutex_lock(&submit->gpu->fence_lock);
- idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
- mutex_unlock(&submit->gpu->fence_lock);
+ xa_erase(&submit->gpu->fences, submit->out_fence_id);
dma_fence_put(submit->out_fence);
}
kfree(submit->pmrs);
* pretends we didn't find a fence in that case.
*/
rcu_read_lock();
- fence = idr_find(&gpu->fence_idr, id);
+ fence = xa_load(&gpu->fences, id);
if (fence)
fence = dma_fence_get_rcu(fence);
rcu_read_unlock();
gpu->drm = drm;
gpu->fence_context = dma_fence_context_alloc(1);
- idr_init(&gpu->fence_idr);
+ xa_init_flags(&gpu->fences, XA_FLAGS_ALLOC);
spin_lock_init(&gpu->fence_spinlock);
INIT_WORK(&gpu->sync_point_work, sync_point_worker);
}
gpu->drm = NULL;
- idr_destroy(&gpu->fence_idr);
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
thermal_cooling_device_unregister(gpu->cooling);
/* Fencing support */
struct mutex fence_lock;
- struct idr fence_idr;
+ struct xarray fences;
+ u32 fences_next; /* For the XArray allocation */
u32 next_fence;
u32 completed_fence;
wait_queue_head_t fence_event;
goto out_unlock;
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
- submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
- submit->out_fence, 0,
- INT_MAX, GFP_KERNEL);
- if (submit->out_fence_id < 0) {
+ ret = xa_alloc_cyclic(&submit->gpu->fences, &submit->out_fence_id,
+ submit->out_fence, xa_limit_31b,
+ &submit->gpu->fences_next, GFP_KERNEL);
+ if (ret < 0) {
drm_sched_job_cleanup(&submit->sched_job);
ret = -ENOMEM;
goto out_unlock;