const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
        const unsigned long do_discard = (bio->bi_rw
                                          & (REQ_DISCARD | REQ_SECURE));
+       const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
        struct md_rdev *blocked_rdev;
        struct blk_plug_cb *cb;
        struct raid1_plug_cb *plug = NULL;
                                   conf->mirrors[i].rdev->data_offset);
                mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
                mbio->bi_end_io = raid1_end_write_request;
-               mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard;
+               mbio->bi_rw =
+                       WRITE | do_flush_fua | do_sync | do_discard | do_same;
                mbio->bi_private = r1_bio;
 
                atomic_inc(&r1_bio->remaining);
        if (IS_ERR(conf))
                return PTR_ERR(conf);
 
+       if (mddev->queue)
+               blk_queue_max_write_same_sectors(mddev->queue,
+                                                mddev->chunk_sectors);
        rdev_for_each(rdev, mddev) {
                if (!mddev->gendisk)
                        continue;
 
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
        const unsigned long do_discard = (bio->bi_rw
                                          & (REQ_DISCARD | REQ_SECURE));
+       const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
        unsigned long flags;
        struct md_rdev *blocked_rdev;
        struct blk_plug_cb *cb;
                                                              rdev));
                        mbio->bi_bdev = rdev->bdev;
                        mbio->bi_end_io = raid10_end_write_request;
-                       mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+                       mbio->bi_rw =
+                               WRITE | do_sync | do_fua | do_discard | do_same;
                        mbio->bi_private = r10_bio;
 
                        atomic_inc(&r10_bio->remaining);
                                                   r10_bio, rdev));
                        mbio->bi_bdev = rdev->bdev;
                        mbio->bi_end_io = raid10_end_write_request;
-                       mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+                       mbio->bi_rw =
+                               WRITE | do_sync | do_fua | do_discard | do_same;
                        mbio->bi_private = r10_bio;
 
                        atomic_inc(&r10_bio->remaining);
        if (mddev->queue) {
                blk_queue_max_discard_sectors(mddev->queue,
                                              mddev->chunk_sectors);
+               blk_queue_max_write_same_sectors(mddev->queue,
+                                                mddev->chunk_sectors);
                blk_queue_io_min(mddev->queue, chunk_size);
                if (conf->geo.raid_disks % conf->geo.near_copies)
                        blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);