* @tx_lock: protects svq, sbufs and sleepers, to allow concurrent senders.
* sending a message might require waking up a dozing remote
* processor, which involves sleeping, hence the mutex.
- * @endpoints: idr of local endpoints, allows fast retrieval
- * @endpoints_lock: lock of the endpoints set
+ * @endpoints: local endpoints, allows fast retrieval
* @sendq: wait queue of sending contexts waiting for a tx buffers
* @sleepers: number of senders that are waiting for a tx buffer
* @ns_ept: the bus's name service endpoint
int last_sbuf;
dma_addr_t bufs_dma;
struct mutex tx_lock;
- struct idr endpoints;
- struct mutex endpoints_lock;
+ struct xarray endpoints;
wait_queue_head_t sendq;
atomic_t sleepers;
struct rpmsg_endpoint *ns_ept;
rpmsg_rx_cb_t cb,
void *priv, u32 addr)
{
- int id_min, id_max, id;
struct rpmsg_endpoint *ept;
struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev;
+ int err;
ept = kzalloc(sizeof(*ept), GFP_KERNEL);
if (!ept)
ept->priv = priv;
ept->ops = &virtio_endpoint_ops;
- /* do we need to allocate a local address ? */
+ /* bind the endpoint to an rpmsg address (and allocate one if needed) */
if (addr == RPMSG_ADDR_ANY) {
- id_min = RPMSG_RESERVED_ADDRESSES;
- id_max = 0;
+ err = xa_alloc(&vrp->endpoints, &ept->addr, ept,
+ XA_LIMIT(RPMSG_RESERVED_ADDRESSES, INT_MAX),
+ GFP_KERNEL);
} else {
- id_min = addr;
- id_max = addr + 1;
+ ept->addr = addr;
+ err = xa_insert(&vrp->endpoints, addr, ept, GFP_KERNEL);
}
- mutex_lock(&vrp->endpoints_lock);
-
- /* bind the endpoint to an rpmsg address (and allocate one if needed) */
- id = idr_alloc(&vrp->endpoints, ept, id_min, id_max, GFP_KERNEL);
- if (id < 0) {
- dev_err(dev, "idr_alloc failed: %d\n", id);
- goto free_ept;
+ if (err < 0) {
+ dev_err(dev, "address busy: %d\n", err);
+ kref_put(&ept->refcount, __ept_release);
+ ept = NULL;
}
- ept->addr = id;
-
- mutex_unlock(&vrp->endpoints_lock);
return ept;
-
-free_ept:
- mutex_unlock(&vrp->endpoints_lock);
- kref_put(&ept->refcount, __ept_release);
- return NULL;
}
static struct rpmsg_endpoint *virtio_rpmsg_create_ept(struct rpmsg_device *rpdev,
__rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept)
{
/* make sure new inbound messages can't find this ept anymore */
- mutex_lock(&vrp->endpoints_lock);
- idr_remove(&vrp->endpoints, ept->addr);
- mutex_unlock(&vrp->endpoints_lock);
+ xa_erase(&vrp->endpoints, ept->addr);
/* make sure in-flight inbound messages won't invoke cb anymore */
mutex_lock(&ept->cb_lock);
}
/* use the dst addr to fetch the callback of the appropriate user */
- mutex_lock(&vrp->endpoints_lock);
-
- ept = idr_find(&vrp->endpoints, msg->dst);
+ xa_lock(&vrp->endpoints);
+ ept = xa_load(&vrp->endpoints, msg->dst);
/* let's make sure no one deallocates ept while we use it */
if (ept)
kref_get(&ept->refcount);
-
- mutex_unlock(&vrp->endpoints_lock);
+ xa_unlock(&vrp->endpoints);
if (ept) {
/* make sure ept->cb doesn't go away while we use it */
vrp->vdev = vdev;
- idr_init(&vrp->endpoints);
- mutex_init(&vrp->endpoints_lock);
+ xa_init_flags(&vrp->endpoints, XA_FLAGS_ALLOC);
mutex_init(&vrp->tx_lock);
init_waitqueue_head(&vrp->sendq);
if (vrp->ns_ept)
__rpmsg_destroy_ept(vrp, vrp->ns_ept);
- idr_destroy(&vrp->endpoints);
-
vdev->config->del_vqs(vrp->vdev);
dma_free_coherent(vdev->dev.parent, total_buf_space,