]> www.infradead.org Git - users/hch/block.git/commitdiff
md: remove mddev->queue
authorChristoph Hellwig <hch@lst.de>
Mon, 26 Feb 2024 14:30:13 +0000 (09:30 -0500)
committerChristoph Hellwig <hch@lst.de>
Mon, 26 Feb 2024 16:21:07 +0000 (11:21 -0500)
Just use the request_queue from the gendisk pointer in the relatively
few places that sill need it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/md/md.c
drivers/md/md.h
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5-ppl.c
drivers/md/raid5.c

index 3acf4ae986d8877ace72caa74b1faa1750301ed1..832975aa8faf4b48512b19fe7646570c19752454 100644 (file)
@@ -5716,10 +5716,10 @@ int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev)
        if (mddev_is_dm(mddev))
                return 0;
 
-       lim = queue_limits_start_update(mddev->queue);
+       lim = queue_limits_start_update(mddev->gendisk->queue);
        queue_limits_stack_bdev(&lim, rdev->bdev, rdev->data_offset,
                                mddev->gendisk->disk_name);
-       return queue_limits_commit_update(mddev->queue, &lim);
+       return queue_limits_commit_update(mddev->gendisk->queue, &lim);
 }
 EXPORT_SYMBOL_GPL(mddev_stack_new_rdev);
 
@@ -5731,11 +5731,11 @@ void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes)
        if (mddev_is_dm(mddev))
                return;
 
-       blk_mq_freeze_queue(mddev->queue);
-       lim = queue_limits_start_update(mddev->queue);
+       blk_mq_freeze_queue(mddev->gendisk->queue);
+       lim = queue_limits_start_update(mddev->gendisk->queue);
        lim.io_opt = lim.io_min * nr_stripes;
-       queue_limits_commit_update(mddev->queue, &lim);
-       blk_mq_unfreeze_queue(mddev->queue);
+       queue_limits_commit_update(mddev->gendisk->queue, &lim);
+       blk_mq_unfreeze_queue(mddev->gendisk->queue);
 }
 EXPORT_SYMBOL_GPL(mddev_update_io_opt);
 
@@ -5821,8 +5821,7 @@ struct mddev *md_alloc(dev_t dev, char *name)
        disk->fops = &md_fops;
        disk->private_data = mddev;
 
-       mddev->queue = disk->queue;
-       blk_queue_write_cache(mddev->queue, true, true);
+       blk_queue_write_cache(disk->queue, true, true);
        disk->events |= DISK_EVENT_MEDIA_CHANGE;
        mddev->gendisk = disk;
        error = add_disk(disk);
@@ -6124,6 +6123,7 @@ int md_run(struct mddev *mddev)
        }
 
        if (!mddev_is_dm(mddev)) {
+               struct request_queue *q = mddev->gendisk->queue;
                bool nonrot = true;
 
                rdev_for_each(rdev, mddev) {
@@ -6135,14 +6135,14 @@ int md_run(struct mddev *mddev)
                if (mddev->degraded)
                        nonrot = false;
                if (nonrot)
-                       blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
+                       blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
                else
-                       blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
-               blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
+                       blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+               blk_queue_flag_set(QUEUE_FLAG_IO_STAT, q);
 
                /* Set the NOWAIT flags if all underlying devices support it */
                if (nowait)
-                       blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
+                       blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
        }
        if (pers->sync_request) {
                if (mddev->kobj.sd &&
@@ -6379,8 +6379,10 @@ static void mddev_detach(struct mddev *mddev)
                mddev->pers->quiesce(mddev, 0);
        }
        md_unregister_thread(mddev, &mddev->thread);
+
+       /* the unplug fn references 'conf' */
        if (!mddev_is_dm(mddev))
-               blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
+               blk_sync_queue(mddev->gendisk->queue);
 }
 
 static void __md_stop(struct mddev *mddev)
@@ -7108,7 +7110,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
        if (!bdev_nowait(rdev->bdev)) {
                pr_info("%s: Disabling nowait because %pg does not support nowait\n",
                        mdname(mddev), rdev->bdev);
-               blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
+               blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->gendisk->queue);
        }
        /*
         * Kick recovery, maybe this spare has to be added to the
index 5db58d076256d3b5a276f7829bb462b9e9e9561b..dc7d3dc1569934fa0a590210f85bd2db6f3ffe67 100644 (file)
@@ -469,7 +469,6 @@ struct mddev {
        struct timer_list               safemode_timer;
        struct percpu_ref               writes_pending;
        int                             sync_checkers;  /* # of threads checking writes_pending */
-       struct request_queue            *queue; /* for plugging ... */
 
        struct bitmap                   *bitmap; /* the bitmap for the device */
        struct {
@@ -822,7 +821,7 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
 {
        if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
            !bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors)
-               mddev->queue->limits.max_write_zeroes_sectors = 0;
+               mddev->gendisk->queue->limits.max_write_zeroes_sectors = 0;
 }
 
 static inline int mddev_suspend_and_lock(struct mddev *mddev)
@@ -885,7 +884,7 @@ static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio,
 #define mddev_add_trace_msg(mddev, fmt, args...)                       \
 do {                                                                   \
        if (!mddev_is_dm(mddev))                                        \
-               blk_add_trace_msg((mddev)->queue, fmt, ##args);         \
+               blk_add_trace_msg((mddev)->gendisk->queue, fmt, ##args); \
 } while (0)
 
 #endif /* _MD_MD_H */
index f65aa6ecec0482e0969ff7d158e4bd52656d4af7..c5d4aeb68404c9cbd3f79797672013f965b45bbb 100644 (file)
@@ -389,7 +389,7 @@ static int raid0_set_limits(struct mddev *mddev)
        lim.io_min = mddev->chunk_sectors << 9;
        lim.io_opt = lim.io_min * mddev->raid_disks;
        mddev_stack_rdev_limits(mddev, &lim);
-       return queue_limits_set(mddev->queue, &lim);
+       return queue_limits_set(mddev->gendisk->queue, &lim);
 }
 
 static int raid0_run(struct mddev *mddev)
index 75329ab2dbd8ded8daf9e073bfc9eaa5cc30a110..445e3d3ff9ff7d932461b67d70b7b847ea7f38a0 100644 (file)
@@ -3083,7 +3083,7 @@ static int raid1_set_limits(struct mddev *mddev)
        blk_set_stacking_limits(&lim);
        lim.max_write_zeroes_sectors = 0;
        mddev_stack_rdev_limits(mddev, &lim);
-       return queue_limits_set(mddev->queue, &lim);
+       return queue_limits_set(mddev->gendisk->queue, &lim);
 }
 
 static void raid1_free(struct mddev *mddev, void *priv);
index 692a3bd94100e20465754a5715eaa668e4e707e6..fd960a5b29fe49d7c5e00f00fd9ec4f5d0d6480b 100644 (file)
@@ -4012,7 +4012,7 @@ static int raid10_set_queue_limits(struct mddev *mddev)
        lim.io_min = mddev->chunk_sectors << 9;
        lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
        mddev_stack_rdev_limits(mddev, &lim);
-       return queue_limits_set(mddev->queue, &lim);
+       return queue_limits_set(mddev->gendisk->queue, &lim);
 }
 
 static int raid10_run(struct mddev *mddev)
index da4ba736c4f0c942e15fca6c4ce65649e82ffe93..a70cbec12ed01737874659ed2ac3c65987a5dc9f 100644 (file)
@@ -1393,7 +1393,8 @@ int ppl_init_log(struct r5conf *conf)
                ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
                ppl_conf->block_size = 512;
        } else {
-               ppl_conf->block_size = queue_logical_block_size(mddev->queue);
+               ppl_conf->block_size =
+                       queue_logical_block_size(mddev->gendisk->queue);
        }
 
        for (i = 0; i < ppl_conf->count; i++) {
index 8d2e3f9419a7f35f282e5c2d7f4e00ae62e6f2b7..651fc4d603dc59a2b375f60de2de219e0bcc9ee2 100644 (file)
@@ -4279,9 +4279,10 @@ static int handle_stripe_dirtying(struct r5conf *conf,
                        }
                }
                if (rcw && !mddev_is_dm(conf->mddev))
-                       blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
-                                         (unsigned long long)sh->sector,
-                                         rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
+                       blk_add_trace_msg(conf->mddev->gendisk->queue,
+                               "raid5 rcw %llu %d %d %d",
+                               (unsigned long long)sh->sector, rcw, qread,
+                               test_bit(STRIPE_DELAYED, &sh->state));
        }
 
        if (rcw > disks && rmw > disks &&
@@ -5693,7 +5694,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
        release_inactive_stripe_list(conf, cb->temp_inactive_list,
                                     NR_STRIPE_HASH_LOCKS);
        if (!mddev_is_dm(mddev))
-               trace_block_unplug(mddev->queue, cnt, !from_schedule);
+               trace_block_unplug(mddev->gendisk->queue, cnt, !from_schedule);
        kfree(cb);
 }
 
@@ -7073,7 +7074,7 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
        if (!conf)
                err = -ENODEV;
        else if (new != conf->skip_copy) {
-               struct request_queue *q = mddev->queue;
+               struct request_queue *q = mddev->gendisk->queue;
 
                conf->skip_copy = new;
                if (new)
@@ -7731,7 +7732,7 @@ static int raid5_set_limits(struct mddev *mddev)
        /* No restrictions on the number of segments in the request */
        lim.max_segments = USHRT_MAX;
 
-       return queue_limits_set(mddev->queue, &lim);
+       return queue_limits_set(mddev->gendisk->queue, &lim);
 }
 
 static int raid5_run(struct mddev *mddev)