]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
md/raid5: only add to wq if reshape is in progress
authorArtur Paszkiewicz <artur.paszkiewicz@intel.com>
Tue, 27 Aug 2024 15:35:35 +0000 (17:35 +0200)
committerSong Liu <song@kernel.org>
Thu, 29 Aug 2024 16:37:10 +0000 (09:37 -0700)
Now that actual overlaps are not handled on the wait_for_overlap wq
anymore, the remaining cases when we wait on this wq are limited to
reshape. If reshape is not in progress, don't add to the wq in
raid5_make_request() because add_wait_queue() / remove_wait_queue()
operations take a spinlock and cause noticeable contention when multiple
threads are submitting requests to the mddev.

Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
Link: https://lore.kernel.org/r/20240827153536.6743-3-artur.paszkiewicz@intel.com
Signed-off-by: Song Liu <song@kernel.org>
drivers/md/raid5.c

index de7d9959b3dc5b8e9d4030cf5162b591a52b44c9..e1ddfb6d8b375cabf0723890479c9cec46f006c8 100644 (file)
@@ -6070,6 +6070,7 @@ static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf,
 static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
 {
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
+       bool on_wq;
        struct r5conf *conf = mddev->private;
        sector_t logical_sector;
        struct stripe_request_ctx ctx = {};
@@ -6143,11 +6144,15 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
         * sequential IO pattern. We don't bother with the optimization when
         * reshaping as the performance benefit is not worth the complexity.
         */
-       if (likely(conf->reshape_progress == MaxSector))
+       if (likely(conf->reshape_progress == MaxSector)) {
                logical_sector = raid5_bio_lowest_chunk_sector(conf, bi);
+               on_wq = false;
+       } else {
+               add_wait_queue(&conf->wait_for_overlap, &wait);
+               on_wq = true;
+       }
        s = (logical_sector - ctx.first_sector) >> RAID5_STRIPE_SHIFT(conf);
 
-       add_wait_queue(&conf->wait_for_overlap, &wait);
        while (1) {
                res = make_stripe_request(mddev, conf, &ctx, logical_sector,
                                          bi);
@@ -6158,6 +6163,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
                        continue;
 
                if (res == STRIPE_SCHEDULE_AND_RETRY) {
+                       WARN_ON_ONCE(!on_wq);
                        /*
                         * Must release the reference to batch_last before
                         * scheduling and waiting for work to be done,
@@ -6182,7 +6188,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
                logical_sector = ctx.first_sector +
                        (s << RAID5_STRIPE_SHIFT(conf));
        }
-       remove_wait_queue(&conf->wait_for_overlap, &wait);
+       if (unlikely(on_wq))
+               remove_wait_queue(&conf->wait_for_overlap, &wait);
 
        if (ctx.batch_last)
                raid5_release_stripe(ctx.batch_last);