submit_bio_noacct(read_bio);
 }
 
+static bool wait_blocked_rdev(struct mddev *mddev, struct bio *bio)
+{
+       struct r1conf *conf = mddev->private;
+       int disks = conf->raid_disks * 2;
+       int i;
+
+retry:
+       for (i = 0; i < disks; i++) {
+               struct md_rdev *rdev = conf->mirrors[i].rdev;
+
+               if (!rdev)
+                       continue;
+
+               if (test_bit(Blocked, &rdev->flags)) {
+                       if (bio->bi_opf & REQ_NOWAIT)
+                               return false;
+
+                       mddev_add_trace_msg(rdev->mddev, "raid1 wait rdev %d blocked",
+                                           rdev->raid_disk);
+                       atomic_inc(&rdev->nr_pending);
+                       md_wait_for_blocked_rdev(rdev, rdev->mddev);
+                       goto retry;
+               }
+
+               /* don't write here until the bad block is acknowledged */
+               if (test_bit(WriteErrorSeen, &rdev->flags) &&
+                   rdev_has_badblock(rdev, bio->bi_iter.bi_sector,
+                                     bio_sectors(bio)) < 0) {
+                       if (bio->bi_opf & REQ_NOWAIT)
+                               return false;
+
+                       set_bit(BlockedBadBlocks, &rdev->flags);
+                       mddev_add_trace_msg(rdev->mddev, "raid1 wait rdev %d blocked",
+                                           rdev->raid_disk);
+                       atomic_inc(&rdev->nr_pending);
+                       md_wait_for_blocked_rdev(rdev, rdev->mddev);
+                       goto retry;
+               }
+       }
+
+       return true;
+}
+
 static void raid1_write_request(struct mddev *mddev, struct bio *bio,
                                int max_write_sectors)
 {
        struct r1bio *r1_bio;
        int i, disks;
        unsigned long flags;
-       struct md_rdev *blocked_rdev;
        int first_clone;
        int max_sectors;
        bool write_behind = false;
                return;
        }
 
- retry_write:
+       if (!wait_blocked_rdev(mddev, bio)) {
+               bio_wouldblock_error(bio);
+               return;
+       }
+
        r1_bio = alloc_r1bio(mddev, bio);
        r1_bio->sectors = max_write_sectors;
 
         */
 
        disks = conf->raid_disks * 2;
-       blocked_rdev = NULL;
        max_sectors = r1_bio->sectors;
        for (i = 0;  i < disks; i++) {
                struct md_rdev *rdev = conf->mirrors[i].rdev;
                if (!is_discard && rdev && test_bit(WriteMostly, &rdev->flags))
                        write_behind = true;
 
-               if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
-                       atomic_inc(&rdev->nr_pending);
-                       blocked_rdev = rdev;
-                       break;
-               }
                r1_bio->bios[i] = NULL;
                if (!rdev || test_bit(Faulty, &rdev->flags)) {
                        if (i < conf->raid_disks)
 
                        is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
                                             &first_bad, &bad_sectors);
-                       if (is_bad < 0) {
-                               /* mustn't write here until the bad block is
-                                * acknowledged*/
-                               set_bit(BlockedBadBlocks, &rdev->flags);
-                               blocked_rdev = rdev;
-                               break;
-                       }
                        if (is_bad && first_bad <= r1_bio->sector) {
                                /* Cannot write here at all */
                                bad_sectors -= (r1_bio->sector - first_bad);
                r1_bio->bios[i] = bio;
        }
 
-       if (unlikely(blocked_rdev)) {
-               /* Wait for this device to become unblocked */
-               int j;
-
-               for (j = 0; j < i; j++)
-                       if (r1_bio->bios[j])
-                               rdev_dec_pending(conf->mirrors[j].rdev, mddev);
-               mempool_free(r1_bio, &conf->r1bio_pool);
-               allow_barrier(conf, bio->bi_iter.bi_sector);
-
-               if (bio->bi_opf & REQ_NOWAIT) {
-                       bio_wouldblock_error(bio);
-                       return;
-               }
-               mddev_add_trace_msg(mddev, "raid1 wait rdev %d blocked",
-                               blocked_rdev->raid_disk);
-               md_wait_for_blocked_rdev(blocked_rdev, mddev);
-               wait_barrier(conf, bio->bi_iter.bi_sector, false);
-               goto retry_write;
-       }
-
        /*
         * When using a bitmap, we may call alloc_behind_master_bio below.
         * alloc_behind_master_bio allocates a copy of the data payload a page