]> www.infradead.org Git - users/willy/xarray.git/commitdiff
rapidio: Convert ch_idr to XArray
authorMatthew Wilcox <willy@infradead.org>
Tue, 8 Jan 2019 15:05:31 +0000 (10:05 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 9 Aug 2019 01:38:14 +0000 (21:38 -0400)
Signed-off-by: Matthew Wilcox <willy@infradead.org>
drivers/rapidio/rio_cm.c

index 50ec53d67a4c0ed3e78a261647538d01ada3cd60..e1d29e1ec394d1421bb8ac36983bb889ccbde6a0 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/rio.h>
 #include <linux/rio_drv.h>
 #include <linux/slab.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
 #include <linux/interrupt.h>
 #include <linux/cdev.h>
 #include <linux/fs.h>
@@ -221,14 +221,14 @@ struct channel_dev {
        struct device   *dev;
 };
 
-static struct rio_channel *riocm_ch_alloc(u16 ch_num);
+static struct rio_channel *riocm_ch_alloc(u32 ch_num);
 static void riocm_ch_free(struct kref *ref);
 static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
                           void *buffer, size_t len);
 static int riocm_ch_close(struct rio_channel *ch);
 
-static DEFINE_SPINLOCK(idr_lock);
-static DEFINE_IDR(ch_idr);
+static DEFINE_XARRAY_ALLOC(channels);
+static int channel_next;
 
 static LIST_HEAD(cm_dev_list);
 static DECLARE_RWSEM(rdev_sem);
@@ -284,11 +284,11 @@ static struct rio_channel *riocm_get_channel(u16 nr)
 {
        struct rio_channel *ch;
 
-       spin_lock_bh(&idr_lock);
-       ch = idr_find(&ch_idr, nr);
+       xa_lock_bh(&channels);
+       ch = xa_load(&channels, nr);
        if (ch)
                kref_get(&ch->ref);
-       spin_unlock_bh(&idr_lock);
+       xa_unlock_bh(&channels);
        return ch;
 }
 
@@ -460,14 +460,9 @@ static int riocm_close_handler(void *data)
 
        riocm_debug(RX_CMD, "for ch=%d", ntohs(hh->dst_ch));
 
-       spin_lock_bh(&idr_lock);
-       ch = idr_find(&ch_idr, ntohs(hh->dst_ch));
-       if (!ch) {
-               spin_unlock_bh(&idr_lock);
+       ch = xa_erase_bh(&channels, ntohs(hh->dst_ch));
+       if (!ch)
                return -ENODEV;
-       }
-       idr_remove(&ch_idr, ch->id);
-       spin_unlock_bh(&idr_lock);
 
        riocm_exch(ch, RIO_CM_DISCONNECT);
 
@@ -1178,9 +1173,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
        return new_ch;
 
 err_put_new_ch:
-       spin_lock_bh(&idr_lock);
-       idr_remove(&ch_idr, new_ch->id);
-       spin_unlock_bh(&idr_lock);
+       xa_erase_bh(&channels, new_ch->id);
        riocm_put_channel(new_ch);
 
 err_put:
@@ -1278,10 +1271,9 @@ exit:
  * Return value: pointer to newly created channel object,
  *               or error-valued pointer
  */
-static struct rio_channel *riocm_ch_alloc(u16 ch_num)
+static struct rio_channel *riocm_ch_alloc(u32 ch_num)
 {
-       int id;
-       int start, end;
+       int err;
        struct rio_channel *ch;
 
        ch = kzalloc(sizeof(*ch), GFP_KERNEL);
@@ -1290,26 +1282,20 @@ static struct rio_channel *riocm_ch_alloc(u16 ch_num)
 
        if (ch_num) {
                /* If requested, try to obtain the specified channel ID */
-               start = ch_num;
-               end = ch_num + 1;
+               err = xa_insert_bh(&channels, ch_num, ch, GFP_KERNEL);
        } else {
                /* Obtain channel ID from the dynamic allocation range */
-               start = chstart;
-               end = RIOCM_MAX_CHNUM + 1;
+               err = xa_alloc_cyclic_bh(&channels, &ch_num, ch,
+                               XA_LIMIT(chstart, RIOCM_MAX_CHNUM),
+                               &channel_next, GFP_KERNEL);
        }
 
-       idr_preload(GFP_KERNEL);
-       spin_lock_bh(&idr_lock);
-       id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT);
-       spin_unlock_bh(&idr_lock);
-       idr_preload_end();
-
-       if (id < 0) {
+       if (err < 0) {
                kfree(ch);
-               return ERR_PTR(id == -ENOSPC ? -EBUSY : id);
+               return ERR_PTR(err);
        }
 
-       ch->id = (u16)id;
+       ch->id = ch_num;
        ch->state = RIO_CM_IDLE;
        spin_lock_init(&ch->lock);
        INIT_LIST_HEAD(&ch->accept_queue);
@@ -1494,24 +1480,24 @@ static int riocm_cdev_open(struct inode *inode, struct file *filp)
 static int riocm_cdev_release(struct inode *inode, struct file *filp)
 {
        struct rio_channel *ch, *_c;
-       unsigned int i;
+       unsigned long i;
        LIST_HEAD(list);
 
        riocm_debug(EXIT, "by %s(%d) filp=%p",
                    current->comm, task_pid_nr(current), filp);
 
        /* Check if there are channels associated with this file descriptor */
-       spin_lock_bh(&idr_lock);
-       idr_for_each_entry(&ch_idr, ch, i) {
-               if (ch && ch->filp == filp) {
+       xa_lock_bh(&channels);
+       xa_for_each(&channels, i, ch) {
+               if (ch->filp == filp) {
                        riocm_debug(EXIT, "ch_%d not released by %s(%d)",
                                    ch->id, current->comm,
                                    task_pid_nr(current));
-                       idr_remove(&ch_idr, ch->id);
+                       __xa_erase(&channels, ch->id);
                        list_add(&ch->ch_node, &list);
                }
        }
-       spin_unlock_bh(&idr_lock);
+       xa_unlock_bh(&channels);
 
        if (!list_empty(&list)) {
                list_for_each_entry_safe(ch, _c, &list, ch_node) {
@@ -1691,18 +1677,18 @@ static int cm_chan_close(struct file *filp, void __user *arg)
        riocm_debug(CHOP, "ch_%d by %s(%d)",
                    ch_num, current->comm, task_pid_nr(current));
 
-       spin_lock_bh(&idr_lock);
-       ch = idr_find(&ch_idr, ch_num);
+       xa_lock_bh(&channels);
+       ch = xa_load(&channels, ch_num);
        if (!ch) {
-               spin_unlock_bh(&idr_lock);
+               xa_unlock_bh(&channels);
                return 0;
        }
        if (ch->filp != filp) {
-               spin_unlock_bh(&idr_lock);
+               xa_unlock_bh(&channels);
                return -EINVAL;
        }
-       idr_remove(&ch_idr, ch->id);
-       spin_unlock_bh(&idr_lock);
+       xa_erase(&channels, ch->id);
+       xa_unlock_bh(&channels);
 
        return riocm_ch_close(ch);
 }
@@ -1991,7 +1977,7 @@ static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif)
        struct cm_dev *cm;
        struct cm_peer *peer;
        struct rio_channel *ch, *_c;
-       unsigned int i;
+       unsigned long i;
        bool found = false;
        LIST_HEAD(list);
 
@@ -2037,16 +2023,16 @@ static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif)
         * Release channels associated with this peer
         */
 
-       spin_lock_bh(&idr_lock);
-       idr_for_each_entry(&ch_idr, ch, i) {
-               if (ch && ch->rdev == rdev) {
+       xa_lock_bh(&channels);
+       xa_for_each(&channels, i, ch) {
+               if (ch->rdev == rdev) {
                        if (atomic_read(&rdev->state) != RIO_DEVICE_SHUTDOWN)
                                riocm_exch(ch, RIO_CM_DISCONNECT);
-                       idr_remove(&ch_idr, ch->id);
+                       __xa_erase(&channels, ch->id);
                        list_add(&ch->ch_node, &list);
                }
        }
-       spin_unlock_bh(&idr_lock);
+       xa_unlock_bh(&channels);
 
        if (!list_empty(&list)) {
                list_for_each_entry_safe(ch, _c, &list, ch_node) {
@@ -2180,7 +2166,7 @@ static void riocm_remove_mport(struct device *dev,
        struct cm_dev *cm;
        struct cm_peer *peer, *temp;
        struct rio_channel *ch, *_c;
-       unsigned int i;
+       unsigned long i;
        bool found = false;
        LIST_HEAD(list);
 
@@ -2203,16 +2189,16 @@ static void riocm_remove_mport(struct device *dev,
        destroy_workqueue(cm->rx_wq);
 
        /* Release channels bound to this mport */
-       spin_lock_bh(&idr_lock);
-       idr_for_each_entry(&ch_idr, ch, i) {
+       xa_lock_bh(&channels);
+       xa_for_each(&channels, i, ch) {
                if (ch->cmdev == cm) {
                        riocm_debug(RDEV, "%s drop ch_%d",
                                    mport->name, ch->id);
-                       idr_remove(&ch_idr, ch->id);
+                       __xa_erase(&channels, ch->id);
                        list_add(&ch->ch_node, &list);
                }
        }
-       spin_unlock_bh(&idr_lock);
+       xa_unlock_bh(&channels);
 
        if (!list_empty(&list)) {
                list_for_each_entry_safe(ch, _c, &list, ch_node) {
@@ -2242,7 +2228,7 @@ static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code,
        void *unused)
 {
        struct rio_channel *ch;
-       unsigned int i;
+       unsigned long i;
        LIST_HEAD(list);
 
        riocm_debug(EXIT, ".");
@@ -2254,15 +2240,15 @@ static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code,
         * notification because function riocm_send_close() should
         * be called outside of spinlock protected code.
         */
-       spin_lock_bh(&idr_lock);
-       idr_for_each_entry(&ch_idr, ch, i) {
+       xa_lock_bh(&channels);
+       xa_for_each(&channels, i, ch) {
                if (ch->state == RIO_CM_CONNECTED) {
                        riocm_debug(EXIT, "close ch %d", ch->id);
-                       idr_remove(&ch_idr, ch->id);
+                       __xa_erase(&channels, ch->id);
                        list_add(&ch->ch_node, &list);
                }
        }
-       spin_unlock_bh(&idr_lock);
+       xa_unlock_bh(&channels);
 
        list_for_each_entry(ch, &list, ch_node)
                riocm_send_close(ch);
@@ -2364,7 +2350,6 @@ static void __exit riocm_exit(void)
        unregister_reboot_notifier(&rio_cm_notifier);
        subsys_interface_unregister(&riocm_interface);
        class_interface_unregister(&rio_mport_interface);
-       idr_destroy(&ch_idr);
 
        device_unregister(riocm_cdev.dev);
        cdev_del(&(riocm_cdev.cdev));