dio->submit.cookie = submit_bio(bio);
 }
 
-static ssize_t iomap_dio_complete(struct iomap_dio *dio)
+ssize_t iomap_dio_complete(struct iomap_dio *dio)
 {
        const struct iomap_dio_ops *dops = dio->dops;
        struct kiocb *iocb = dio->iocb;
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(iomap_dio_complete);
 
 static void iomap_dio_complete_work(struct work_struct *work)
 {
  * Returns -ENOTBLK In case of a page invalidation invalidation failure for
  * writes.  The callers needs to fall back to buffered I/O in this case.
  */
-ssize_t
-iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+struct iomap_dio *
+__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
                bool wait_for_completion)
 {
        struct iomap_dio *dio;
 
        if (!count)
-               return 0;
+               return NULL;
 
        if (WARN_ON(is_sync_kiocb(iocb) && !wait_for_completion))
-               return -EIO;
+               return ERR_PTR(-EIO);
 
        dio = kmalloc(sizeof(*dio), GFP_KERNEL);
        if (!dio)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        dio->iocb = iocb;
        atomic_set(&dio->ref, 1);
        dio->wait_for_completion = wait_for_completion;
        if (!atomic_dec_and_test(&dio->ref)) {
                if (!wait_for_completion)
-                       return -EIOCBQUEUED;
+                       return ERR_PTR(-EIOCBQUEUED);
 
                for (;;) {
                        set_current_state(TASK_UNINTERRUPTIBLE);
                __set_current_state(TASK_RUNNING);
        }
 
-       return iomap_dio_complete(dio);
+       return dio;
 
 out_free_dio:
        kfree(dio);
-       return ret;
+       if (ret)
+               return ERR_PTR(ret);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(__iomap_dio_rw);
+
+ssize_t
+iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+               const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
+               bool wait_for_completion)
+{
+       struct iomap_dio *dio;
+
+       dio = __iomap_dio_rw(iocb, iter, ops, dops, wait_for_completion);
+       if (IS_ERR_OR_NULL(dio))
+               return PTR_ERR_OR_ZERO(dio);
+       return iomap_dio_complete(dio);
 }
 EXPORT_SYMBOL_GPL(iomap_dio_rw);
 
 struct address_space;
 struct fiemap_extent_info;
 struct inode;
+struct iomap_dio;
 struct iomap_writepage_ctx;
 struct iov_iter;
 struct kiocb;
 ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
                bool wait_for_completion);
+struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+               const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
+               bool wait_for_completion);
+ssize_t iomap_dio_complete(struct iomap_dio *dio);
 int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
 
 #ifdef CONFIG_SWAP