}
 
 static loff_t
-iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
-               void *data, struct iomap *iomap)
+iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
+               struct iomap_dio *dio, struct iomap *iomap)
 {
-       struct iomap_dio *dio = data;
        unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
        unsigned int fs_block_size = i_blocksize(inode), pad;
        unsigned int align = iov_iter_alignment(dio->submit.iter);
        if ((pos | length | align) & ((1 << blkbits) - 1))
                return -EINVAL;
 
-       switch (iomap->type) {
-       case IOMAP_HOLE:
-               if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
-                       return -EIO;
-               /*FALLTHRU*/
-       case IOMAP_UNWRITTEN:
-               if (!(dio->flags & IOMAP_DIO_WRITE)) {
-                       length = iov_iter_zero(length, dio->submit.iter);
-                       dio->size += length;
-                       return length;
-               }
+       if (iomap->type == IOMAP_UNWRITTEN) {
                dio->flags |= IOMAP_DIO_UNWRITTEN;
                need_zeroout = true;
-               break;
-       case IOMAP_MAPPED:
-               if (iomap->flags & IOMAP_F_SHARED)
-                       dio->flags |= IOMAP_DIO_COW;
-               if (iomap->flags & IOMAP_F_NEW) {
-                       need_zeroout = true;
-               } else {
-                       /*
-                        * Use a FUA write if we need datasync semantics, this
-                        * is a pure data IO that doesn't require any metadata
-                        * updates and the underlying device supports FUA. This
-                        * allows us to avoid cache flushes on IO completion.
-                        */
-                       if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
-                           (dio->flags & IOMAP_DIO_WRITE_FUA) &&
-                           blk_queue_fua(bdev_get_queue(iomap->bdev)))
-                               use_fua = true;
-               }
-               break;
-       default:
-               WARN_ON_ONCE(1);
-               return -EIO;
+       }
+
+       if (iomap->flags & IOMAP_F_SHARED)
+               dio->flags |= IOMAP_DIO_COW;
+
+       if (iomap->flags & IOMAP_F_NEW) {
+               need_zeroout = true;
+       } else {
+               /*
+                * Use a FUA write if we need datasync semantics, this
+                * is a pure data IO that doesn't require any metadata
+                * updates and the underlying device supports FUA. This
+                * allows us to avoid cache flushes on IO completion.
+                */
+               if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
+                   (dio->flags & IOMAP_DIO_WRITE_FUA) &&
+                   blk_queue_fua(bdev_get_queue(iomap->bdev)))
+                       use_fua = true;
        }
 
        /*
        return copied;
 }
 
+static loff_t
+iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
+{
+       length = iov_iter_zero(length, dio->submit.iter);
+       dio->size += length;
+       return length;
+}
+
+static loff_t
+iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
+               void *data, struct iomap *iomap)
+{
+       struct iomap_dio *dio = data;
+
+       switch (iomap->type) {
+       case IOMAP_HOLE:
+               if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
+                       return -EIO;
+               return iomap_dio_hole_actor(length, dio);
+       case IOMAP_UNWRITTEN:
+               if (!(dio->flags & IOMAP_DIO_WRITE))
+                       return iomap_dio_hole_actor(length, dio);
+               return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
+       case IOMAP_MAPPED:
+               return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
+       default:
+               WARN_ON_ONCE(1);
+               return -EIO;
+       }
+}
+
 /*
  * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
  * is being issued as AIO or not.  This allows us to optimise pure data writes