unsigned long *data_bitmap;
struct xarray data_blocks;
- struct idr commands;
+ struct xarray commands;
struct timer_list cmd_timer;
unsigned int cmd_time_out;
struct tcmu_dev *tcmu_dev;
struct list_head queue_entry;
- uint16_t cmd_id;
+ uint32_t cmd_id;
/* Can't use se_cmd when cleaning up expired cmds, because if
cmd has been completed then accessing se_cmd is off limits */
struct timer_list *timer)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
- int cmd_id;
+ int err;
if (tcmu_cmd->cmd_id)
goto setup_timer;
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
- if (cmd_id < 0) {
+ err = xa_alloc(&udev->commands, &tcmu_cmd->cmd_id, tcmu_cmd,
+ XA_LIMIT(0, 0xffff), GFP_NOWAIT);
+ if (err < 0) {
pr_err("tcmu: Could not allocate cmd id.\n");
- return cmd_id;
+ return err;
}
- tcmu_cmd->cmd_id = cmd_id;
pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id,
udev->name, tmo / MSEC_PER_SEC);
}
WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
- cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
+ cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
if (!cmd) {
pr_err("cmd_id %u not found, ring is broken\n",
entry->hdr.cmd_id);
return handled;
}
-static int tcmu_check_expired_cmd(int id, void *p, void *data)
+static int tcmu_check_expired_cmd(int id, struct tcmu_cmd *cmd)
{
- struct tcmu_cmd *cmd = p;
struct tcmu_dev *udev = cmd->tcmu_dev;
u8 scsi_status;
struct se_cmd *se_cmd;
list_del_init(&cmd->queue_entry);
} else {
list_del_init(&cmd->queue_entry);
- idr_remove(&udev->commands, id);
+ xa_erase(&udev->commands, id);
tcmu_free_cmd(cmd);
scsi_status = SAM_STAT_TASK_SET_FULL;
}
INIT_LIST_HEAD(&udev->timedout_entry);
INIT_LIST_HEAD(&udev->qfull_queue);
INIT_LIST_HEAD(&udev->inflight_queue);
- idr_init(&udev->commands);
+ xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1);
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
tcmu_cmd->cmd_id, udev->name);
if (fail) {
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
+ xa_erase(&udev->commands, tcmu_cmd->cmd_id);
/*
* We were not able to even start the command, so
* fail with busy to allow a retry in case runner
pr_debug("cmd %u on dev %s failed with %u\n",
tcmu_cmd->cmd_id, udev->name, scsi_ret);
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
+ xa_erase(&udev->commands, tcmu_cmd->cmd_id);
/*
* Ignore scsi_ret for now. target_complete_cmd
* drops it.
struct se_device *dev = &udev->se_dev;
struct tcmu_cmd *cmd;
bool all_expired = true;
- int i;
+ unsigned long i;
vfree(udev->mb_addr);
udev->mb_addr = NULL;
/* Upper layer should drain all requests before calling this */
mutex_lock(&udev->cmdr_lock);
- idr_for_each_entry(&udev->commands, cmd, i) {
+ xa_for_each(&udev->commands, i, cmd) {
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
all_expired = false;
}
- idr_destroy(&udev->commands);
+ xa_destroy(&udev->commands);
WARN_ON(!all_expired);
tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max);
{
struct tcmu_mailbox *mb;
struct tcmu_cmd *cmd;
- int i;
+ unsigned long i;
mutex_lock(&udev->cmdr_lock);
- idr_for_each_entry(&udev->commands, cmd, i) {
+ xa_for_each(&udev->commands, i, cmd) {
if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
continue;
cmd->cmd_id, udev->name,
test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
- idr_remove(&udev->commands, i);
+ xa_erase(&udev->commands, i);
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
list_del_init(&cmd->queue_entry);
if (err_level == 1) {
{
struct tcmu_dev *udev, *tmp_dev;
LIST_HEAD(devs);
+ struct tcmu_cmd *cmd;
+ unsigned long index;
spin_lock_bh(&timed_out_udevs_lock);
list_splice_init(&timed_out_udevs, &devs);
spin_unlock_bh(&timed_out_udevs_lock);
mutex_lock(&udev->cmdr_lock);
- idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
+ xa_for_each(&udev->commands, index, cmd)
+ tcmu_check_expired_cmd(index, cmd);
tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);