]> www.infradead.org Git - users/hch/block.git/commitdiff
md: pass in max_sectors for pers->sync_request()
authorYu Kuai <yukuai3@huawei.com>
Tue, 11 Jun 2024 13:22:50 +0000 (21:22 +0800)
committerSong Liu <song@kernel.org>
Wed, 12 Jun 2024 16:32:57 +0000 (16:32 +0000)
For different sync_action, sync_thread will use different max_sectors,
see details in md_sync_max_sectors(), currently both md_do_sync() and
pers->sync_request() in eatch iteration have to get the same
max_sectors. Hence pass in max_sectors for pers->sync_request() to
prevent redundant code.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240611132251.1967786-12-yukuai1@huaweicloud.com
drivers/md/md.c
drivers/md/md.h
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c

index ec2ef4dd42cf56eb14112662c95abc0537cc6bf0..c0426a6d2fd1971ff9d61491ce138e73028df11d 100644 (file)
@@ -9186,7 +9186,8 @@ void md_do_sync(struct md_thread *thread)
                if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
                        break;
 
-               sectors = mddev->pers->sync_request(mddev, j, &skipped);
+               sectors = mddev->pers->sync_request(mddev, j, max_sectors,
+                                                   &skipped);
                if (sectors == 0) {
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                        break;
@@ -9276,7 +9277,7 @@ void md_do_sync(struct md_thread *thread)
                mddev->curr_resync_completed = mddev->curr_resync;
                sysfs_notify_dirent_safe(mddev->sysfs_completed);
        }
-       mddev->pers->sync_request(mddev, max_sectors, &skipped);
+       mddev->pers->sync_request(mddev, max_sectors, max_sectors, &skipped);
 
        if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
            mddev->curr_resync > MD_RESYNC_ACTIVE) {
index 41781e41d8ffc2c606af09dc908b2fd936fbfa63..2dc52edec3fe63a77753bb63a3e50699489a6de3 100644 (file)
@@ -729,7 +729,8 @@ struct md_personality
        int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
        int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
        int (*spare_active) (struct mddev *mddev);
-       sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
+       sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr,
+                                sector_t max_sector, int *skipped);
        int (*resize) (struct mddev *mddev, sector_t sectors);
        sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
        int (*check_reshape) (struct mddev *mddev);
index 3d54f30112a0e8fd82c66a6ba096470a939dfa83..2bbfb4e682b2ff63fd8ecb6a06b7eb3e9df03372 100644 (file)
@@ -2756,12 +2756,12 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
  */
 
 static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
-                                  int *skipped)
+                                  sector_t max_sector, int *skipped)
 {
        struct r1conf *conf = mddev->private;
        struct r1bio *r1_bio;
        struct bio *bio;
-       sector_t max_sector, nr_sectors;
+       sector_t nr_sectors;
        int disk = -1;
        int i;
        int wonly = -1;
@@ -2777,7 +2777,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
                if (init_resync(conf))
                        return 0;
 
-       max_sector = mddev->dev_sectors;
        if (sector_nr >= max_sector) {
                /* If we aborted, we need to abort the
                 * sync on the 'current' bitmap chunk (there will
index f8d7c02c6ed561163a855bce209e0be2dfcc650e..4e804602d1e53a9e819eaf296999fe6b297e20e1 100644 (file)
@@ -3139,12 +3139,12 @@ static void raid10_set_cluster_sync_high(struct r10conf *conf)
  */
 
 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
-                            int *skipped)
+                                   sector_t max_sector, int *skipped)
 {
        struct r10conf *conf = mddev->private;
        struct r10bio *r10_bio;
        struct bio *biolist = NULL, *bio;
-       sector_t max_sector, nr_sectors;
+       sector_t nr_sectors;
        int i;
        int max_sync;
        sector_t sync_blocks;
@@ -3174,10 +3174,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                        return 0;
 
  skipped:
-       max_sector = mddev->dev_sectors;
-       if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
-           test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
-               max_sector = mddev->resync_max_sectors;
        if (sector_nr >= max_sector) {
                conf->cluster_sync_low = 0;
                conf->cluster_sync_high = 0;
index a84389311dd1eaba53a5ed92ffbef6db318c3638..013adc5ba0e124c2e2718b80ea4a1ed27edde838 100644 (file)
@@ -6457,11 +6457,10 @@ ret:
 }
 
 static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr,
-                                         int *skipped)
+                                         sector_t max_sector, int *skipped)
 {
        struct r5conf *conf = mddev->private;
        struct stripe_head *sh;
-       sector_t max_sector = mddev->dev_sectors;
        sector_t sync_blocks;
        int still_degraded = 0;
        int i;