]> www.infradead.org Git - users/hch/block.git/commitdiff
block: move the io_stat flag setting to queue_limits
authorChristoph Hellwig <hch@lst.de>
Mon, 17 Jun 2024 06:04:43 +0000 (08:04 +0200)
committerJens Axboe <axboe@kernel.dk>
Wed, 19 Jun 2024 13:58:28 +0000 (07:58 -0600)
Move the io_stat flag into the queue_limits feature field so that it can
be set atomically with the queue frozen.

Simplify md and dm to set the flag unconditionally instead of avoiding
setting a simple flag for cases where it already is set by other means,
which is a bit pointless.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/20240617060532.127975-17-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-debugfs.c
block/blk-mq.c
block/blk-sysfs.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/md/md.c
drivers/nvme/host/multipath.c
include/linux/blkdev.h

index 6b7edb50bfd3fa939ecd9d399c0cac629b4b1585..cbe99444ed1a54f56706052c1611e307533b40d4 100644 (file)
@@ -84,7 +84,6 @@ static const char *const blk_queue_flag_name[] = {
        QUEUE_FLAG_NAME(NOMERGES),
        QUEUE_FLAG_NAME(SAME_COMP),
        QUEUE_FLAG_NAME(FAIL_IO),
-       QUEUE_FLAG_NAME(IO_STAT),
        QUEUE_FLAG_NAME(NOXMERGES),
        QUEUE_FLAG_NAME(SYNCHRONOUS),
        QUEUE_FLAG_NAME(SAME_FORCE),
index 58b0d6c7cc34d6a0bbeb78bf491fa2b5609b3936..cf67dc13f7dd4ced1351642d1e41f656cd07a656 100644 (file)
@@ -4116,7 +4116,11 @@ struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
        struct request_queue *q;
        int ret;
 
-       q = blk_alloc_queue(lim ? lim : &default_lim, set->numa_node);
+       if (!lim)
+               lim = &default_lim;
+       lim->features |= BLK_FEAT_IO_STAT;
+
+       q = blk_alloc_queue(lim, set->numa_node);
        if (IS_ERR(q))
                return q;
        q->queuedata = queuedata;
index 9174aca3b85526ecfffbdaa1a3fc2945721d39b7..6f58530fb3c08ebfb4dfbd93789661a3e849c35e 100644 (file)
@@ -324,7 +324,7 @@ queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
 
 QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL)
 QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM)
-QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
+QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT)
 QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
 #undef QUEUE_SYSFS_BIT_FNS
 
index 0a3838e45affd4de9b1182832a74e1bad390818f..5d5431e531aea99354a9ed7e84c929dfcaa3e3b9 100644 (file)
@@ -579,6 +579,12 @@ int dm_split_args(int *argc, char ***argvp, char *input)
        return 0;
 }
 
+static void dm_set_stacking_limits(struct queue_limits *limits)
+{
+       blk_set_stacking_limits(limits);
+       limits->features |= BLK_FEAT_IO_STAT;
+}
+
 /*
  * Impose necessary and sufficient conditions on a devices's table such
  * that any incoming bio which respects its logical_block_size can be
@@ -617,7 +623,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *t,
        for (i = 0; i < t->num_targets; i++) {
                ti = dm_table_get_target(t, i);
 
-               blk_set_stacking_limits(&ti_limits);
+               dm_set_stacking_limits(&ti_limits);
 
                /* combine all target devices' limits */
                if (ti->type->iterate_devices)
@@ -1591,7 +1597,7 @@ int dm_calculate_queue_limits(struct dm_table *t,
        unsigned int zone_sectors = 0;
        bool zoned = false;
 
-       blk_set_stacking_limits(limits);
+       dm_set_stacking_limits(limits);
 
        t->integrity_supported = true;
        for (unsigned int i = 0; i < t->num_targets; i++) {
@@ -1604,7 +1610,7 @@ int dm_calculate_queue_limits(struct dm_table *t,
        for (unsigned int i = 0; i < t->num_targets; i++) {
                struct dm_target *ti = dm_table_get_target(t, i);
 
-               blk_set_stacking_limits(&ti_limits);
+               dm_set_stacking_limits(&ti_limits);
 
                if (!ti->type->iterate_devices) {
                        /* Set I/O hints portion of queue limits */
index 13037d6a6f62a25ab68295eb4160a4f6cf831161..8a976cee448bed4c454eb5f6fb94aa9463de4ecd 100644 (file)
@@ -2386,22 +2386,15 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
        struct table_device *td;
        int r;
 
-       switch (type) {
-       case DM_TYPE_REQUEST_BASED:
+       WARN_ON_ONCE(type == DM_TYPE_NONE);
+
+       if (type == DM_TYPE_REQUEST_BASED) {
                md->disk->fops = &dm_rq_blk_dops;
                r = dm_mq_init_request_queue(md, t);
                if (r) {
                        DMERR("Cannot initialize queue for request-based dm mapped device");
                        return r;
                }
-               break;
-       case DM_TYPE_BIO_BASED:
-       case DM_TYPE_DAX_BIO_BASED:
-               blk_queue_flag_set(QUEUE_FLAG_IO_STAT, md->queue);
-               break;
-       case DM_TYPE_NONE:
-               WARN_ON_ONCE(true);
-               break;
        }
 
        r = dm_calculate_queue_limits(t, &limits);
index c23423c51fb7c2c6d29eb89002784ca502268e57..8db0db8d5a27ac96f88ac4c3a5f62f7bd5df5b13 100644 (file)
@@ -5787,7 +5787,8 @@ struct mddev *md_alloc(dev_t dev, char *name)
        int unit;
        int error;
        struct queue_limits lim = {
-               .features               = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA,
+               .features               = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA |
+                                         BLK_FEAT_IO_STAT,
        };
 
        /*
@@ -6152,8 +6153,6 @@ int md_run(struct mddev *mddev)
        if (!mddev_is_dm(mddev)) {
                struct request_queue *q = mddev->gendisk->queue;
 
-               blk_queue_flag_set(QUEUE_FLAG_IO_STAT, q);
-
                /* Set the NOWAIT flags if all underlying devices support it */
                if (nowait)
                        blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
index 58c13304e558e05b2a8532e3cb837a9a3153fafb..eea727cfa9e67de84edce1b15c536b775c7b701b 100644 (file)
@@ -538,6 +538,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
 
        blk_set_stacking_limits(&lim);
        lim.dma_alignment = 3;
+       lim.features |= BLK_FEAT_IO_STAT;
        if (head->ids.csi != NVME_CSI_ZNS)
                lim.max_zone_append_sectors = 0;
 
@@ -550,7 +551,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
                        ctrl->subsys->instance, head->instance);
 
        blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
-       blk_queue_flag_set(QUEUE_FLAG_IO_STAT, head->disk->queue);
        /*
         * This assumes all controllers that refer to a namespace either
         * support poll queues or not.  That is not a strict guarantee,
index cf1bbf566b2bcd7db144f1325ac95213b37b5fd7..5fafb2f95fd1a3158db16f290a71590c69566265 100644 (file)
@@ -295,6 +295,9 @@ enum {
 
        /* contributes to the random number pool */
        BLK_FEAT_ADD_RANDOM                     = (1u << 3),
+
+       /* do disk/partitions IO accounting */
+       BLK_FEAT_IO_STAT                        = (1u << 4),
 };
 
 /*
@@ -558,7 +561,6 @@ struct request_queue {
 #define QUEUE_FLAG_NOMERGES     3      /* disable merge attempts */
 #define QUEUE_FLAG_SAME_COMP   4       /* complete on same CPU-group */
 #define QUEUE_FLAG_FAIL_IO     5       /* fake timeout */
-#define QUEUE_FLAG_IO_STAT     7       /* do disk/partitions IO accounting */
 #define QUEUE_FLAG_NOXMERGES   9       /* No extended merges */
 #define QUEUE_FLAG_SYNCHRONOUS 11      /* always completes in submit context */
 #define QUEUE_FLAG_SAME_FORCE  12      /* force complete on same CPU */
@@ -577,8 +579,7 @@ struct request_queue {
 #define QUEUE_FLAG_SQ_SCHED     30     /* single queue style io dispatch */
 #define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/
 
-#define QUEUE_FLAG_MQ_DEFAULT  ((1UL << QUEUE_FLAG_IO_STAT) |          \
-                                (1UL << QUEUE_FLAG_SAME_COMP) |        \
+#define QUEUE_FLAG_MQ_DEFAULT  ((1UL << QUEUE_FLAG_SAME_COMP) |        \
                                 (1UL << QUEUE_FLAG_NOWAIT))
 
 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
@@ -592,7 +593,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
 #define blk_queue_noxmerges(q) \
        test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
 #define blk_queue_nonrot(q)    ((q)->limits.features & BLK_FEAT_ROTATIONAL)
-#define blk_queue_io_stat(q)   test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
+#define blk_queue_io_stat(q)   ((q)->limits.features & BLK_FEAT_IO_STAT)
 #define blk_queue_zone_resetall(q)     \
        test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
 #define blk_queue_dax(q)       test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)