const enum req_op op = bio_op(bio);
        const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
        const blk_opf_t do_fua = bio->bi_opf & REQ_FUA;
+       const blk_opf_t do_atomic = bio->bi_opf & REQ_ATOMIC;
        unsigned long flags;
        struct r10conf *conf = mddev->private;
        struct md_rdev *rdev;
        mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
                                   choose_data_offset(r10_bio, rdev));
        mbio->bi_end_io = raid10_end_write_request;
-       mbio->bi_opf = op | do_sync | do_fua;
+       mbio->bi_opf = op | do_sync | do_fua | do_atomic;
        if (!replacement && test_bit(FailFast,
                                     &conf->mirrors[devnum].rdev->flags)
                         && enough(conf, devnum))
                                continue;
                        }
                        if (is_bad) {
-                               int good_sectors = first_bad - dev_sector;
+                               int good_sectors;
+
+                               /*
+                                * We cannot atomically write this, so just
+                                * error in that case. It could be possible to
+                                * atomically write other mirrors, but the
+                                * complexity of supporting that is not worth
+                                * the benefit.
+                                */
+                               if (bio->bi_opf & REQ_ATOMIC) {
+                                       error = -EIO;
+                                       goto err_handle;
+                               }
+
+                               good_sectors = first_bad - dev_sector;
                                if (good_sectors < max_sectors)
                                        max_sectors = good_sectors;
                        }
        lim.max_write_zeroes_sectors = 0;
        lim.io_min = mddev->chunk_sectors << 9;
        lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
+       lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
        err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
        if (err) {
                queue_limits_cancel_update(mddev->gendisk->queue);