u64 lun)
{
struct scsi_device *sdev;
+ unsigned long index;
- list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
+ xa_for_each(&starget->devices, index, sdev) {
if (sdev->sdev_state == SDEV_DEL)
continue;
- if (sdev->lun ==lun)
+ if (sdev->lun == lun)
return sdev;
}
static void scsi_single_lun_run(struct scsi_device *current_sdev)
{
struct Scsi_Host *shost = current_sdev->host;
- struct scsi_device *sdev, *tmp;
+ struct scsi_device *sdev;
struct scsi_target *starget = scsi_target(current_sdev);
+ unsigned long index;
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
spin_lock_irqsave(shost->host_lock, flags);
if (starget->starget_sdev_user)
goto out;
- list_for_each_entry_safe(sdev, tmp, &starget->devices,
- same_target_siblings) {
+ xa_for_each(&starget->devices, index, sdev) {
if (sdev == current_sdev)
continue;
if (scsi_device_get(sdev))
mutex_init(&sdev->state_mutex);
sdev->sdev_state = SDEV_CREATED;
INIT_LIST_HEAD(&sdev->siblings);
- INIT_LIST_HEAD(&sdev->same_target_siblings);
INIT_LIST_HEAD(&sdev->cmd_list);
INIT_LIST_HEAD(&sdev->starved_entry);
INIT_LIST_HEAD(&sdev->event_list);
starget->channel = channel;
starget->can_queue = 0;
INIT_LIST_HEAD(&starget->siblings);
- INIT_LIST_HEAD(&starget->devices);
+ xa_init_flags(&starget->devices, XA_FLAGS_ALLOC);
starget->state = STARGET_CREATED;
starget->scsi_level = SCSI_2;
starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
spin_lock_irqsave(sdev->host->host_lock, flags);
list_del(&sdev->siblings);
- list_del(&sdev->same_target_siblings);
+ xa_erase(&sdev->sdev_target->devices, sdev->pertarget_id);
list_del(&sdev->starved_entry);
spin_unlock_irqrestore(sdev->host->host_lock, flags);
{
unsigned long flags;
struct Scsi_Host *shost = sdev->host;
- struct scsi_target *starget = sdev->sdev_target;
+ struct scsi_target *starget = sdev->sdev_target;
device_initialize(&sdev->sdev_gendev);
sdev->sdev_gendev.bus = &scsi_bus_type;
sdev->lun_in_cdb = 1;
transport_setup_device(&sdev->sdev_gendev);
+ BUG_ON(xa_alloc(&starget->devices, &sdev->pertarget_id, sdev,
+ xa_limit_32b, GFP_ATOMIC));
spin_lock_irqsave(shost->host_lock, flags);
- list_add_tail(&sdev->same_target_siblings, &starget->devices);
list_add_tail(&sdev->siblings, &shost->__devices);
spin_unlock_irqrestore(shost->host_lock, flags);
/*
struct Scsi_Host *host;
struct request_queue *request_queue;
- /* the next two are protected by the host->host_lock */
+ /* protected by the host->host_lock */
struct list_head siblings; /* list of all devices on this host */
- struct list_head same_target_siblings; /* just the devices sharing same target id */
+ u32 pertarget_id; /* index into target's device list */
atomic_t device_busy; /* commands actually active on LLDD */
atomic_t device_blocked; /* Device returned QUEUE_FULL. */
struct scsi_target {
struct scsi_device *starget_sdev_user;
struct list_head siblings;
- struct list_head devices;
+ struct xarray devices;
struct device dev;
struct kref reap_ref; /* last put renders target invisible */
unsigned int channel;