#include <linux/poll.h>
#include <linux/moduleparam.h>
#include <linux/cdev.h>
-#include <linux/idr.h>
#include <linux/seq_file.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/atomic.h>
#include <linux/ratelimit.h>
#include <linux/uio.h>
+#include <linux/xarray.h>
#include <linux/cred.h> /* for sg_check_file_access() */
#include "scsi.h"
static int sg_add_device(struct device *, struct class_interface *);
static void sg_remove_device(struct device *, struct class_interface *);
-static DEFINE_IDR(sg_index_idr);
-static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
- file descriptor list for device */
+static DEFINE_XARRAY_FLAGS(sg_devs, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
static struct class_interface sg_interface = {
.add_dev = sg_add_device,
{
struct request_queue *q = scsidp->request_queue;
Sg_device *sdp;
- unsigned long iflags;
int error;
u32 k;
return ERR_PTR(-ENOMEM);
}
- idr_preload(GFP_KERNEL);
- write_lock_irqsave(&sg_index_lock, iflags);
-
- error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT);
+ xa_lock_irq(&sg_devs);
+ error = xa_alloc(&sg_devs, &k, sdp, XA_LIMIT(0, SG_MAX_DEVS),
+ GFP_KERNEL);
if (error < 0) {
- if (error == -ENOSPC) {
+ if (error == -EBUSY) {
sdev_printk(KERN_WARNING, scsidp,
"Unable to attach sg device type=%d, minor number exceeds %d\n",
scsidp->type, SG_MAX_DEVS - 1);
error = -ENODEV;
} else {
- sdev_printk(KERN_WARNING, scsidp, "%s: idr "
- "allocation Sg_device failure: %d\n",
+ sdev_printk(KERN_WARNING, scsidp,
+ "%s: ID allocation Sg_device failure: %d\n",
__func__, error);
}
goto out_unlock;
}
- k = error;
SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp,
"sg_alloc: dev=%d \n", k));
error = 0;
out_unlock:
- write_unlock_irqrestore(&sg_index_lock, iflags);
- idr_preload_end();
+ xa_unlock_irq(&sg_devs);
if (error) {
kfree(sdp);
Sg_device *sdp = NULL;
struct cdev * cdev = NULL;
int error;
- unsigned long iflags;
disk = alloc_disk(1);
if (!disk) {
return 0;
cdev_add_err:
- write_lock_irqsave(&sg_index_lock, iflags);
- idr_remove(&sg_index_idr, sdp->index);
- write_unlock_irqrestore(&sg_index_lock, iflags);
+ xa_erase_irq(&sg_devs, sdp->index);
kfree(sdp);
out:
struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
unsigned long flags;
- /* CAUTION! Note that the device can still be found via idr_find()
- * even though the refcount is 0. Therefore, do idr_remove() BEFORE
+ /* CAUTION! Note that the device can still be found in sg_devs
+ * even though the refcount is 0. Therefore, remove it BEFORE
* any other cleanup.
*/
- write_lock_irqsave(&sg_index_lock, flags);
- idr_remove(&sg_index_idr, sdp->index);
- write_unlock_irqrestore(&sg_index_lock, flags);
+ xa_lock_irqsave(&sg_devs, flags);
+ __xa_erase(&sg_devs, sdp->index);
+ xa_unlock_irqrestore(&sg_devs, flags);
SCSI_LOG_TIMEOUT(3,
sg_printk(KERN_INFO, sdp, "sg_device_destroy\n"));
sg_sysfs_valid = 0;
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
SG_MAX_DEVS);
- idr_destroy(&sg_index_idr);
}
static int
}
#ifdef CONFIG_SCSI_PROC_FS
-static int
-sg_idr_max_id(int id, void *p, void *data)
-{
- int *k = data;
-
- if (*k < id)
- *k = id;
-
- return 0;
-}
-
static int
sg_last_dev(void)
{
- int k = -1;
- unsigned long iflags;
+ void *p;
+ unsigned long index;
+
+ xa_for_each(&sg_devs, index, p)
+ ;
- read_lock_irqsave(&sg_index_lock, iflags);
- idr_for_each(&sg_index_idr, sg_idr_max_id, &k);
- read_unlock_irqrestore(&sg_index_lock, iflags);
- return k + 1; /* origin 1 */
+ return index + 1;
}
#endif
-/* must be called with sg_index_lock held */
static Sg_device *sg_lookup_dev(int dev)
{
- return idr_find(&sg_index_idr, dev);
+ return xa_load(&sg_devs, dev);
}
static Sg_device *
struct sg_device *sdp;
unsigned long flags;
- read_lock_irqsave(&sg_index_lock, flags);
+ xa_lock_irqsave(&sg_devs, flags);
sdp = sg_lookup_dev(dev);
if (!sdp)
sdp = ERR_PTR(-ENXIO);
sdp = ERR_PTR(-ENODEV);
} else
kref_get(&sdp->d_ref);
- read_unlock_irqrestore(&sg_index_lock, flags);
+ xa_unlock_irqrestore(&sg_devs, flags);
return sdp;
}
struct scsi_device *scsidp;
unsigned long iflags;
- read_lock_irqsave(&sg_index_lock, iflags);
+ xa_lock_irqsave(&sg_devs, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
if ((NULL == sdp) || (NULL == sdp->device) ||
(atomic_read(&sdp->detaching)))
(int) atomic_read(&scsidp->device_busy),
(int) scsi_device_online(scsidp));
}
- read_unlock_irqrestore(&sg_index_lock, iflags);
+ xa_unlock_irqrestore(&sg_devs, iflags);
return 0;
}
struct scsi_device *scsidp;
unsigned long iflags;
- read_lock_irqsave(&sg_index_lock, iflags);
+ xa_lock_irqsave(&sg_devs, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
scsidp = sdp ? sdp->device : NULL;
if (sdp && scsidp && (!atomic_read(&sdp->detaching)))
scsidp->vendor, scsidp->model, scsidp->rev);
else
seq_puts(s, "<no active device>\n");
- read_unlock_irqrestore(&sg_index_lock, iflags);
+ xa_unlock_irqrestore(&sg_devs, iflags);
return 0;
}
-/* must be called while holding sg_index_lock */
+/* must be called while holding sg_devs lock */
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{
int k, new_interface, blen, usg;
seq_printf(s, "max_active_device=%d def_reserved_size=%d\n",
(int)it->max, sg_big_buff);
- read_lock_irqsave(&sg_index_lock, iflags);
+ xa_lock_irqsave(&sg_devs, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
if (NULL == sdp)
goto skip;
}
read_unlock(&sdp->sfd_lock);
skip:
- read_unlock_irqrestore(&sg_index_lock, iflags);
+ xa_unlock_irqrestore(&sg_devs, iflags);
return 0;
}