* @req_id: current request id if supplicant is doing synchronous
* communication, else -1
* @reqs: queued request not yet retrieved by supplicant
- * @idr: IDR holding all requests currently being processed
- * by supplicant
+ * @requests: All requests currently being processed by supplicant
* @reqs_c: completion used by supplicant when waiting for a
* request to be queued.
*/
int req_id;
struct list_head reqs;
- struct idr idr;
+ struct xarray requests;
struct completion reqs_c;
};
memset(supp, 0, sizeof(*supp));
mutex_init(&supp->mutex);
init_completion(&supp->reqs_c);
- idr_init(&supp->idr);
+ xa_init_flags(&supp->requests, XA_FLAGS_ALLOC1);
INIT_LIST_HEAD(&supp->reqs);
supp->req_id = -1;
}
void optee_supp_uninit(struct optee_supp *supp)
{
mutex_destroy(&supp->mutex);
- idr_destroy(&supp->idr);
}
void optee_supp_release(struct optee_supp *supp)
{
- int id;
+ unsigned long id;
struct optee_supp_req *req;
struct optee_supp_req *req_tmp;
mutex_lock(&supp->mutex);
/* Abort all request retrieved by supplicant */
- idr_for_each_entry(&supp->idr, req, id) {
- idr_remove(&supp->idr, id);
+ xa_for_each(&supp->requests, id, req) {
+ xa_erase(&supp->requests, id);
req->ret = TEEC_ERROR_COMMUNICATION;
complete(&req->c);
}
int num_params, int *id)
{
struct optee_supp_req *req;
+ int ret;
if (supp->req_id != -1) {
/*
return ERR_PTR(-EINVAL);
}
- *id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL);
- if (*id < 0)
+ ret = xa_alloc(&supp->requests, id, req, xa_limit_31b, GFP_KERNEL);
+ if (ret < 0)
return ERR_PTR(-ENOMEM);
list_del(&req->link);
nm = 0;
}
- req = idr_find(&supp->idr, id);
+ req = xa_load(&supp->requests, id);
if (!req)
return ERR_PTR(-ENOENT);
if ((num_params - nm) != req->num_params)
return ERR_PTR(-EINVAL);
- idr_remove(&supp->idr, id);
+ xa_erase(&supp->requests, id);
supp->req_id = -1;
*num_meta = nm;