#include <linux/rio.h>
#include <linux/rio_drv.h>
#include <linux/slab.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <linux/interrupt.h>
#include <linux/cdev.h>
#include <linux/fs.h>
struct device *dev;
};
-static struct rio_channel *riocm_ch_alloc(u16 ch_num);
+static struct rio_channel *riocm_ch_alloc(u32 ch_num);
static void riocm_ch_free(struct kref *ref);
static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
void *buffer, size_t len);
static int riocm_ch_close(struct rio_channel *ch);
-static DEFINE_SPINLOCK(idr_lock);
-static DEFINE_IDR(ch_idr);
+static DEFINE_XARRAY_ALLOC(channels);
+static int channel_next;
static LIST_HEAD(cm_dev_list);
static DECLARE_RWSEM(rdev_sem);
{
struct rio_channel *ch;
- spin_lock_bh(&idr_lock);
- ch = idr_find(&ch_idr, nr);
+ xa_lock_bh(&channels);
+ ch = xa_load(&channels, nr);
if (ch)
kref_get(&ch->ref);
- spin_unlock_bh(&idr_lock);
+ xa_unlock_bh(&channels);
return ch;
}
riocm_debug(RX_CMD, "for ch=%d", ntohs(hh->dst_ch));
- spin_lock_bh(&idr_lock);
- ch = idr_find(&ch_idr, ntohs(hh->dst_ch));
- if (!ch) {
- spin_unlock_bh(&idr_lock);
+ ch = xa_erase_bh(&channels, ntohs(hh->dst_ch));
+ if (!ch)
return -ENODEV;
- }
- idr_remove(&ch_idr, ch->id);
- spin_unlock_bh(&idr_lock);
riocm_exch(ch, RIO_CM_DISCONNECT);
return new_ch;
err_put_new_ch:
- spin_lock_bh(&idr_lock);
- idr_remove(&ch_idr, new_ch->id);
- spin_unlock_bh(&idr_lock);
+ xa_erase_bh(&channels, new_ch->id);
riocm_put_channel(new_ch);
err_put:
* Return value: pointer to newly created channel object,
* or error-valued pointer
*/
-static struct rio_channel *riocm_ch_alloc(u16 ch_num)
+static struct rio_channel *riocm_ch_alloc(u32 ch_num)
{
- int id;
- int start, end;
+ int err;
struct rio_channel *ch;
ch = kzalloc(sizeof(*ch), GFP_KERNEL);
if (ch_num) {
/* If requested, try to obtain the specified channel ID */
- start = ch_num;
- end = ch_num + 1;
+ err = xa_insert_bh(&channels, ch_num, ch, GFP_KERNEL);
} else {
/* Obtain channel ID from the dynamic allocation range */
- start = chstart;
- end = RIOCM_MAX_CHNUM + 1;
+ err = xa_alloc_cyclic_bh(&channels, &ch_num, ch,
+ XA_LIMIT(chstart, RIOCM_MAX_CHNUM),
+ &channel_next, GFP_KERNEL);
}
- idr_preload(GFP_KERNEL);
- spin_lock_bh(&idr_lock);
- id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT);
- spin_unlock_bh(&idr_lock);
- idr_preload_end();
-
- if (id < 0) {
+ if (err < 0) {
kfree(ch);
- return ERR_PTR(id == -ENOSPC ? -EBUSY : id);
+ return ERR_PTR(err);
}
- ch->id = (u16)id;
+ ch->id = ch_num;
ch->state = RIO_CM_IDLE;
spin_lock_init(&ch->lock);
INIT_LIST_HEAD(&ch->accept_queue);
static int riocm_cdev_release(struct inode *inode, struct file *filp)
{
struct rio_channel *ch, *_c;
- unsigned int i;
+ unsigned long i;
LIST_HEAD(list);
riocm_debug(EXIT, "by %s(%d) filp=%p",
current->comm, task_pid_nr(current), filp);
/* Check if there are channels associated with this file descriptor */
- spin_lock_bh(&idr_lock);
- idr_for_each_entry(&ch_idr, ch, i) {
- if (ch && ch->filp == filp) {
+ xa_lock_bh(&channels);
+ xa_for_each(&channels, i, ch) {
+ if (ch->filp == filp) {
riocm_debug(EXIT, "ch_%d not released by %s(%d)",
ch->id, current->comm,
task_pid_nr(current));
- idr_remove(&ch_idr, ch->id);
+ __xa_erase(&channels, ch->id);
list_add(&ch->ch_node, &list);
}
}
- spin_unlock_bh(&idr_lock);
+ xa_unlock_bh(&channels);
if (!list_empty(&list)) {
list_for_each_entry_safe(ch, _c, &list, ch_node) {
riocm_debug(CHOP, "ch_%d by %s(%d)",
ch_num, current->comm, task_pid_nr(current));
- spin_lock_bh(&idr_lock);
- ch = idr_find(&ch_idr, ch_num);
+ xa_lock_bh(&channels);
+ ch = xa_load(&channels, ch_num);
if (!ch) {
- spin_unlock_bh(&idr_lock);
+ xa_unlock_bh(&channels);
return 0;
}
if (ch->filp != filp) {
- spin_unlock_bh(&idr_lock);
+ xa_unlock_bh(&channels);
return -EINVAL;
}
- idr_remove(&ch_idr, ch->id);
- spin_unlock_bh(&idr_lock);
+ xa_erase(&channels, ch->id);
+ xa_unlock_bh(&channels);
return riocm_ch_close(ch);
}
struct cm_dev *cm;
struct cm_peer *peer;
struct rio_channel *ch, *_c;
- unsigned int i;
+ unsigned long i;
bool found = false;
LIST_HEAD(list);
* Release channels associated with this peer
*/
- spin_lock_bh(&idr_lock);
- idr_for_each_entry(&ch_idr, ch, i) {
- if (ch && ch->rdev == rdev) {
+ xa_lock_bh(&channels);
+ xa_for_each(&channels, i, ch) {
+ if (ch->rdev == rdev) {
if (atomic_read(&rdev->state) != RIO_DEVICE_SHUTDOWN)
riocm_exch(ch, RIO_CM_DISCONNECT);
- idr_remove(&ch_idr, ch->id);
+ __xa_erase(&channels, ch->id);
list_add(&ch->ch_node, &list);
}
}
- spin_unlock_bh(&idr_lock);
+ xa_unlock_bh(&channels);
if (!list_empty(&list)) {
list_for_each_entry_safe(ch, _c, &list, ch_node) {
struct cm_dev *cm;
struct cm_peer *peer, *temp;
struct rio_channel *ch, *_c;
- unsigned int i;
+ unsigned long i;
bool found = false;
LIST_HEAD(list);
destroy_workqueue(cm->rx_wq);
/* Release channels bound to this mport */
- spin_lock_bh(&idr_lock);
- idr_for_each_entry(&ch_idr, ch, i) {
+ xa_lock_bh(&channels);
+ xa_for_each(&channels, i, ch) {
if (ch->cmdev == cm) {
riocm_debug(RDEV, "%s drop ch_%d",
mport->name, ch->id);
- idr_remove(&ch_idr, ch->id);
+ __xa_erase(&channels, ch->id);
list_add(&ch->ch_node, &list);
}
}
- spin_unlock_bh(&idr_lock);
+ xa_unlock_bh(&channels);
if (!list_empty(&list)) {
list_for_each_entry_safe(ch, _c, &list, ch_node) {
void *unused)
{
struct rio_channel *ch;
- unsigned int i;
+ unsigned long i;
LIST_HEAD(list);
riocm_debug(EXIT, ".");
* notification because function riocm_send_close() should
* be called outside of spinlock protected code.
*/
- spin_lock_bh(&idr_lock);
- idr_for_each_entry(&ch_idr, ch, i) {
+ xa_lock_bh(&channels);
+ xa_for_each(&channels, i, ch) {
if (ch->state == RIO_CM_CONNECTED) {
riocm_debug(EXIT, "close ch %d", ch->id);
- idr_remove(&ch_idr, ch->id);
+ __xa_erase(&channels, ch->id);
list_add(&ch->ch_node, &list);
}
}
- spin_unlock_bh(&idr_lock);
+ xa_unlock_bh(&channels);
list_for_each_entry(ch, &list, ch_node)
riocm_send_close(ch);
unregister_reboot_notifier(&rio_cm_notifier);
subsys_interface_unregister(&riocm_interface);
class_interface_unregister(&rio_mport_interface);
- idr_destroy(&ch_idr);
device_unregister(riocm_cdev.dev);
cdev_del(&(riocm_cdev.cdev));