struct raid1_plug_cb *plug = NULL;
        int first_clone;
        int max_sectors;
+       bool write_behind = false;
 
        if (mddev_is_clustered(mddev) &&
             md_cluster_ops->area_resyncing(mddev, WRITE,
        max_sectors = r1_bio->sectors;
        for (i = 0;  i < disks; i++) {
                struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+
+               /*
+                * The write-behind io is only attempted on drives marked as
+                * write-mostly, which means we could allocate write behind
+                * bio later.
+                */
+               if (rdev && test_bit(WriteMostly, &rdev->flags))
+                       write_behind = true;
+
                if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
                        atomic_inc(&rdev->nr_pending);
                        blocked_rdev = rdev;
                goto retry_write;
        }
 
+       /*
+        * When using a bitmap, we may call alloc_behind_master_bio below.
+        * alloc_behind_master_bio allocates a copy of the data payload a page
+        * at a time and thus needs a new bio that can fit the whole payload
+        * this bio in page sized chunks.
+        */
+       if (write_behind && bitmap)
+               max_sectors = min_t(int, max_sectors,
+                                   BIO_MAX_VECS * (PAGE_SIZE >> 9));
        if (max_sectors < bio_sectors(bio)) {
                struct bio *split = bio_split(bio, max_sectors,
                                              GFP_NOIO, &conf->bio_split);