return 0;
 }
 
+static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
+{
+       struct r1bio *r1bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
+       struct resync_pages *rps;
+       struct bio *bio;
+       int i;
+
+       for (i = conf->poolinfo->raid_disks; i--; ) {
+               bio = r1bio->bios[i];
+               rps = bio->bi_private;
+               bio_reset(bio);
+               bio->bi_private = rps;
+       }
+       r1bio->master_bio = NULL;
+       return r1bio;
+}
+
 /*
  * perform a "sync" on one "block"
  *
 
        bitmap_cond_end_sync(mddev->bitmap, sector_nr,
                mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
-       r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
+       r1_bio = raid1_alloc_init_r1buf(conf);
 
        raise_barrier(conf, sector_nr);
 
 
        return 0;
 }
 
+static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
+{
+       struct r10bio *r10bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+       struct rsync_pages *rp;
+       struct bio *bio;
+       int nalloc;
+       int i;
+
+       if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
+           test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
+               nalloc = conf->copies; /* resync */
+       else
+               nalloc = 2; /* recovery */
+
+       for (i = 0; i < nalloc; i++) {
+               bio = r10bio->devs[i].bio;
+               rp = bio->bi_private;
+               bio_reset(bio);
+               bio->bi_private = rp;
+               bio = r10bio->devs[i].repl_bio;
+               if (bio) {
+                       rp = bio->bi_private;
+                       bio_reset(bio);
+                       bio->bi_private = rp;
+               }
+       }
+       return r10bio;
+}
+
 /*
  * perform a "sync" on one "block"
  *
                                atomic_inc(&mreplace->nr_pending);
                        rcu_read_unlock();
 
-                       r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+                       r10_bio = raid10_alloc_init_r10buf(conf);
                        r10_bio->state = 0;
                        raise_barrier(conf, rb2 != NULL);
                        atomic_set(&r10_bio->remaining, 0);
                }
                if (sync_blocks < max_sync)
                        max_sync = sync_blocks;
-               r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+               r10_bio = raid10_alloc_init_r10buf(conf);
                r10_bio->state = 0;
 
                r10_bio->mddev = mddev;
 
 read_more:
        /* Now schedule reads for blocks from sector_nr to last */
-       r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+       r10_bio = raid10_alloc_init_r10buf(conf);
        r10_bio->state = 0;
        raise_barrier(conf, sectors_done != 0);
        atomic_set(&r10_bio->remaining, 0);