return 0;
}
-#define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
-static const char *const alloc_policy_name[] = {
- BLK_TAG_ALLOC_NAME(FIFO),
- BLK_TAG_ALLOC_NAME(RR),
-};
-#undef BLK_TAG_ALLOC_NAME
-
#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
static const char *const hctx_flag_name[] = {
HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
HCTX_FLAG_NAME(STACKING),
HCTX_FLAG_NAME(TAG_HCTX_SHARED),
HCTX_FLAG_NAME(BLOCKING),
+ HCTX_FLAG_NAME(TAG_RR),
HCTX_FLAG_NAME(NO_SCHED_BY_DEFAULT),
};
#undef HCTX_FLAG_NAME
static int hctx_flags_show(void *data, struct seq_file *m)
{
struct blk_mq_hw_ctx *hctx = data;
- const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
- BUILD_BUG_ON(ARRAY_SIZE(hctx_flag_name) !=
- BLK_MQ_F_ALLOC_POLICY_START_BIT);
- BUILD_BUG_ON(ARRAY_SIZE(alloc_policy_name) != BLK_TAG_ALLOC_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(hctx_flag_name) != ilog2(BLK_MQ_F_MAX));
- seq_puts(m, "alloc_policy=");
- if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
- alloc_policy_name[alloc_policy])
- seq_puts(m, alloc_policy_name[alloc_policy]);
- else
- seq_printf(m, "%d", alloc_policy);
- seq_puts(m, " ");
- blk_flags_show(m,
- hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
- hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
+ blk_flags_show(m, hctx->flags, hctx_flag_name,
+ ARRAY_SIZE(hctx_flag_name));
seq_puts(m, "\n");
return 0;
}
}
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
- unsigned int reserved_tags,
- int node, int alloc_policy)
+ unsigned int reserved_tags, unsigned int flags, int node)
{
unsigned int depth = total_tags - reserved_tags;
- bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
+ bool round_robin = flags & BLK_MQ_F_TAG_RR;
struct blk_mq_tags *tags;
if (total_tags > BLK_MQ_TAG_MAX) {
if (node == NUMA_NO_NODE)
node = set->numa_node;
- tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
- BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
+ tags = blk_mq_init_tags(nr_tags, reserved_tags, set->flags, node);
if (!tags)
return NULL;
};
struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
- unsigned int reserved_tags, int node, int alloc_policy);
+ unsigned int reserved_tags, unsigned int flags, int node);
void blk_mq_free_tags(struct blk_mq_tags *tags);
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
.shost_groups = ahci_shost_groups, \
.sdev_groups = ahci_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .tag_alloc_policy_rr = true, \
.device_configure = ata_scsi_device_configure
extern struct ata_port_operations ahci_ops;
.device_configure = pata_macio_device_configure,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
static struct ata_port_operations pata_macio_ops = {
.dma_boundary = MV_DMA_BOUNDARY,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
.device_configure = ata_scsi_device_configure
};
.device_configure = nv_adma_device_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
static const struct scsi_host_template nv_swncq_sht = {
.device_configure = nv_swncq_device_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
/*
.can_queue = SIL24_MAX_CMDS,
.sg_tablesize = SIL24_MAX_SGE,
.dma_boundary = ATA_DMA_BOUNDARY,
- .tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.device_configure = ata_scsi_device_configure
.slave_alloc = hisi_sas_slave_alloc,
.shost_groups = host_v3_hw_groups,
.sdev_groups = sdev_groups_v3_hw,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
.host_reset = hisi_sas_host_reset,
.host_tagset = 1,
.mq_poll = queue_complete_v3_hw,
tag_set->queue_depth = shost->can_queue;
tag_set->cmd_size = cmd_size;
tag_set->numa_node = dev_to_node(shost->dma_dev);
- tag_set->flags |=
- BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
+ if (shost->hostt->tag_alloc_policy_rr)
+ tag_set->flags |= BLK_MQ_F_TAG_RR;
if (shost->queuecommand_may_block)
tag_set->flags |= BLK_MQ_F_BLOCKING;
tag_set->driver_data = shost;
BLK_EH_RESET_TIMER,
};
-/* Keep alloc_policy_name[] in sync with the definitions below */
-enum {
- BLK_TAG_ALLOC_FIFO, /* allocate starting from 0 */
- BLK_TAG_ALLOC_RR, /* allocate starting from last allocated tag */
- BLK_TAG_ALLOC_MAX
-};
-
/**
* struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
* block device
BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
BLK_MQ_F_BLOCKING = 1 << 4,
+ /*
+ * Alloc tags on a round-robin base instead of the first available one.
+ */
+ BLK_MQ_F_TAG_RR = 1 << 5,
+
/*
* Select 'none' during queue registration in case of a single hwq
* or shared hwqs instead of 'mq-deadline'.
*/
BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 6,
- BLK_MQ_F_ALLOC_POLICY_START_BIT = 7,
- BLK_MQ_F_ALLOC_POLICY_BITS = 1,
+
+ BLK_MQ_F_MAX = 1 << 7,
};
-#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
- ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
- ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
-#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
- ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
- << BLK_MQ_F_ALLOC_POLICY_START_BIT)
#define BLK_MQ_MAX_DEPTH (10240)
#define BLK_MQ_NO_HCTX_IDX (-1U)
#define ATA_SUBBASE_SHT(drv_name) \
__ATA_BASE_SHT(drv_name), \
.can_queue = ATA_DEF_QUEUE, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .tag_alloc_policy_rr = true, \
.device_configure = ata_scsi_device_configure
#define ATA_SUBBASE_SHT_QD(drv_name, drv_qd) \
__ATA_BASE_SHT(drv_name), \
.can_queue = drv_qd, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .tag_alloc_policy_rr = true, \
.device_configure = ata_scsi_device_configure
#define ATA_BASE_SHT(drv_name) \
*/
short cmd_per_lun;
- /* If use block layer to manage tags, this is tag allocation policy */
- int tag_alloc_policy;
+ /*
+ * Allocate tags starting from last allocated tag.
+ */
+ bool tag_alloc_policy_rr : 1;
/*
* Track QUEUE_FULL events and reduce queue depth on demand.