]> www.infradead.org Git - users/hch/block.git/commitdiff
md: add a mddev_is_dm helper
authorChristoph Hellwig <hch@lst.de>
Mon, 26 Feb 2024 14:28:50 +0000 (09:28 -0500)
committerChristoph Hellwig <hch@lst.de>
Mon, 26 Feb 2024 14:53:02 +0000 (09:53 -0500)
Add a helper to check for a DM-mapped MD device instead of using
the obfuscated ->gendisk or ->queue NULL checks.

Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/md/md.c
drivers/md/md.h
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c

index 409e57242b27f63a695ba12b8e5e05e75533c245..01a219b2559bdbd93f7abd96c70a1a440f32d58f 100644 (file)
@@ -2401,7 +2401,7 @@ int md_integrity_register(struct mddev *mddev)
 
        if (list_empty(&mddev->disks))
                return 0; /* nothing to do */
-       if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
+       if (mddev_is_dm(mddev) || blk_get_integrity(mddev->gendisk))
                return 0; /* shouldn't register, or already is */
        rdev_for_each(rdev, mddev) {
                /* skip spares and non-functional disks */
@@ -2454,7 +2454,7 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
 {
        struct blk_integrity *bi_mddev;
 
-       if (!mddev->gendisk)
+       if (mddev_is_dm(mddev))
                return 0;
 
        bi_mddev = blk_get_integrity(mddev->gendisk);
@@ -5923,7 +5923,7 @@ int md_run(struct mddev *mddev)
                invalidate_bdev(rdev->bdev);
                if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) {
                        mddev->ro = MD_RDONLY;
-                       if (mddev->gendisk)
+                       if (!mddev_is_dm(mddev))
                                set_disk_ro(mddev->gendisk, 1);
                }
 
@@ -6082,7 +6082,7 @@ int md_run(struct mddev *mddev)
                }
        }
 
-       if (mddev->queue) {
+       if (!mddev_is_dm(mddev)) {
                bool nonrot = true;
 
                rdev_for_each(rdev, mddev) {
@@ -6338,7 +6338,7 @@ static void mddev_detach(struct mddev *mddev)
                mddev->pers->quiesce(mddev, 0);
        }
        md_unregister_thread(mddev, &mddev->thread);
-       if (mddev->queue)
+       if (!mddev_is_dm(mddev))
                blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
 }
 
@@ -7304,10 +7304,9 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
        if (!rv) {
                if (mddev_is_clustered(mddev))
                        md_cluster_ops->update_size(mddev, old_dev_sectors);
-               else if (mddev->queue) {
+               else if (!mddev_is_dm(mddev))
                        set_capacity_and_notify(mddev->gendisk,
                                                mddev->array_sectors);
-               }
        }
        return rv;
 }
@@ -9137,7 +9136,7 @@ void md_do_sync(struct md_thread *thread)
                        mddev->delta_disks > 0 &&
                        mddev->pers->finish_reshape &&
                        mddev->pers->size &&
-                       mddev->queue) {
+                       !mddev_is_dm(mddev)) {
                mddev_lock_nointr(mddev);
                md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
                mddev_unlock(mddev);
index 91ee8951fc8dcb85071dd85975310a2c88c061c1..b08e655f8bec41e690b27ad9c42fe5308ed5ab0f 100644 (file)
@@ -864,16 +864,24 @@ int do_md_run(struct mddev *mddev);
 
 extern const struct block_device_operations md_fops;
 
+/*
+ * MD devices can be used undeneath by DM, in which case ->gendisk is NULL.
+ */
+static inline bool mddev_is_dm(struct mddev *mddev)
+{
+       return !mddev->gendisk;
+}
+
 static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio,
                sector_t sector)
 {
-       if (mddev->gendisk)
+       if (!mddev_is_dm(mddev))
                trace_block_bio_remap(bio, disk_devt(mddev->gendisk), sector);
 }
 
 #define mddev_add_trace_msg(mddev, fmt, args...)                       \
 do {                                                                   \
-       if ((mddev)->gendisk)                                           \
+       if (!mddev_is_dm(mddev))                                        \
                blk_add_trace_msg((mddev)->queue, fmt, ##args);         \
 } while (0)
 
index aff094de97434765c6f5cfb18d03dc9eda559251..9f787ae77ede887611d6cdb8071936c9ec800acf 100644 (file)
@@ -399,7 +399,7 @@ static int raid0_run(struct mddev *mddev)
                mddev->private = conf;
        }
        conf = mddev->private;
-       if (mddev->queue) {
+       if (!mddev_is_dm(mddev)) {
                struct md_rdev *rdev;
 
                blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
index 3f47fe828b21bb05e4ea51bf4a897511bebe8c52..3b1227f67a6d611172bac8f7aca1bf39966af9b3 100644 (file)
@@ -1782,7 +1782,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        for (mirror = first; mirror <= last; mirror++) {
                p = conf->mirrors + mirror;
                if (!p->rdev) {
-                       if (mddev->gendisk)
+                       if (!mddev_is_dm(mddev))
                                disk_stack_limits(mddev->gendisk, rdev->bdev,
                                                  rdev->data_offset << 9);
 
@@ -3109,14 +3109,11 @@ static int raid1_run(struct mddev *mddev)
        if (IS_ERR(conf))
                return PTR_ERR(conf);
 
-       if (mddev->queue)
+       if (!mddev_is_dm(mddev)) {
                blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
-
-       rdev_for_each(rdev, mddev) {
-               if (!mddev->gendisk)
-                       continue;
-               disk_stack_limits(mddev->gendisk, rdev->bdev,
-                                 rdev->data_offset << 9);
+               rdev_for_each(rdev, mddev)
+                       disk_stack_limits(mddev->gendisk, rdev->bdev,
+                                         rdev->data_offset << 9);
        }
 
        mddev->degraded = 0;
index b6c5194c22308db066aef5e98c8a2b5f7596b147..95fa9e728f95a953fedcc8ee9bc4d7607826a6d1 100644 (file)
@@ -2124,7 +2124,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        continue;
                }
 
-               if (mddev->gendisk)
+               if (!mddev_is_dm(mddev))
                        disk_stack_limits(mddev->gendisk, rdev->bdev,
                                          rdev->data_offset << 9);
 
@@ -2144,7 +2144,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                set_bit(Replacement, &rdev->flags);
                rdev->raid_disk = repl_slot;
                err = 0;
-               if (mddev->gendisk)
+               if (!mddev_is_dm(mddev))
                        disk_stack_limits(mddev->gendisk, rdev->bdev,
                                          rdev->data_offset << 9);
                conf->fullsync = 1;
@@ -4040,7 +4040,7 @@ static int raid10_run(struct mddev *mddev)
                }
        }
 
-       if (mddev->queue) {
+       if (!mddev_is_dm(conf->mddev)) {
                blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
                blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
                raid10_set_io_opt(conf);
@@ -4074,7 +4074,7 @@ static int raid10_run(struct mddev *mddev)
                if (first || diff < min_offset_diff)
                        min_offset_diff = diff;
 
-               if (mddev->gendisk)
+               if (!mddev_is_dm(mddev))
                        disk_stack_limits(mddev->gendisk, rdev->bdev,
                                          rdev->data_offset << 9);
 
@@ -4959,7 +4959,7 @@ static void end_reshape(struct r10conf *conf)
        conf->reshape_safe = MaxSector;
        spin_unlock_irq(&conf->device_lock);
 
-       if (conf->mddev->queue)
+       if (!mddev_is_dm(conf->mddev))
                raid10_set_io_opt(conf);
        conf->fullsync = 0;
 }
index 969df5c584653e386cbd536ee919d023f87e92ef..287fc1540a8d321b248914819ab5f0618af99ac4 100644 (file)
@@ -2416,12 +2416,12 @@ static int grow_stripes(struct r5conf *conf, int num)
        size_t namelen = sizeof(conf->cache_name[0]);
        int devs = max(conf->raid_disks, conf->previous_raid_disks);
 
-       if (conf->mddev->gendisk)
+       if (mddev_is_dm(conf->mddev))
                snprintf(conf->cache_name[0], namelen,
-                       "raid%d-%s", conf->level, mdname(conf->mddev));
+                       "raid%d-%p", conf->level, conf->mddev);
        else
                snprintf(conf->cache_name[0], namelen,
-                       "raid%d-%p", conf->level, conf->mddev);
+                       "raid%d-%s", conf->level, mdname(conf->mddev));
        snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
 
        conf->active_name = 0;
@@ -4278,11 +4278,10 @@ static int handle_stripe_dirtying(struct r5conf *conf,
                                        set_bit(STRIPE_DELAYED, &sh->state);
                        }
                }
-               if (rcw && conf->mddev->queue)
-                       mddev_add_trace_msg(conf->mddev,
-                               "raid5 rcw %llu %d %d %d",
-                               sh->sector, rcw, qread,
-                               test_bit(STRIPE_DELAYED, &sh->state));
+               if (rcw && !mddev_is_dm(conf->mddev))
+                       blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
+                                         (unsigned long long)sh->sector,
+                                         rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
        }
 
        if (rcw > disks && rmw > disks &&
@@ -5693,7 +5692,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
        }
        release_inactive_stripe_list(conf, cb->temp_inactive_list,
                                     NR_STRIPE_HASH_LOCKS);
-       if (mddev->queue)
+       if (!mddev_is_dm(mddev))
                trace_block_unplug(mddev->queue, cnt, !from_schedule);
        kfree(cb);
 }
@@ -7942,7 +7941,7 @@ static int raid5_run(struct mddev *mddev)
                        mdname(mddev));
        md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
 
-       if (mddev->queue) {
+       if (!mddev_is_dm(mddev)) {
                int chunk_size;
                /* read-ahead size must cover two whole stripes, which
                 * is 2 * (datadisks) * chunksize where 'n' is the
@@ -8546,7 +8545,7 @@ static void end_reshape(struct r5conf *conf)
                spin_unlock_irq(&conf->device_lock);
                wake_up(&conf->wait_for_overlap);
 
-               if (conf->mddev->queue)
+               if (!mddev_is_dm(conf->mddev))
                        raid5_set_io_opt(conf);
        }
 }