ret = blk_status_to_errno(dio->bio.bi_status);
                        }
  
-                       dio->iocb->ki_complete(iocb, ret, 0);
+                       dio->iocb->ki_complete(iocb, ret);
 -                      if (dio->flags & DIO_MULTI_BIO)
 -                              bio_put(&dio->bio);
 +                      bio_put(&dio->bio);
                } else {
                        struct task_struct *waiter = dio->waiter;
  
        return ret;
  }
  
-       iocb->ki_complete(iocb, ret, 0);
 +static void blkdev_bio_end_io_async(struct bio *bio)
 +{
 +      struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
 +      struct kiocb *iocb = dio->iocb;
 +      ssize_t ret;
 +
 +      if (likely(!bio->bi_status)) {
 +              ret = dio->size;
 +              iocb->ki_pos += ret;
 +      } else {
 +              ret = blk_status_to_errno(bio->bi_status);
 +      }
 +
++      iocb->ki_complete(iocb, ret);
 +
 +      if (dio->flags & DIO_SHOULD_DIRTY) {
 +              bio_check_pages_dirty(bio);
 +      } else {
 +              bio_release_pages(bio, false);
 +              bio_put(bio);
 +      }
 +}
 +
 +static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
 +                                      struct iov_iter *iter,
 +                                      unsigned int nr_pages)
 +{
 +      struct block_device *bdev = iocb->ki_filp->private_data;
 +      struct blkdev_dio *dio;
 +      struct bio *bio;
 +      loff_t pos = iocb->ki_pos;
 +      int ret = 0;
 +
 +      if ((pos | iov_iter_alignment(iter)) &
 +          (bdev_logical_block_size(bdev) - 1))
 +              return -EINVAL;
 +
 +      bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool);
 +      dio = container_of(bio, struct blkdev_dio, bio);
 +      dio->flags = 0;
 +      dio->iocb = iocb;
 +      bio_set_dev(bio, bdev);
 +      bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
 +      bio->bi_write_hint = iocb->ki_hint;
 +      bio->bi_end_io = blkdev_bio_end_io_async;
 +      bio->bi_ioprio = iocb->ki_ioprio;
 +
 +      if (iov_iter_is_bvec(iter)) {
 +              /*
 +               * Users don't rely on the iterator being in any particular
 +               * state for async I/O returning -EIOCBQUEUED, hence we can
 +               * avoid expensive iov_iter_advance(). Bypass
 +               * bio_iov_iter_get_pages() and set the bvec directly.
 +               */
 +              bio_iov_bvec_set(bio, iter);
 +      } else {
 +              ret = bio_iov_iter_get_pages(bio, iter);
 +              if (unlikely(ret)) {
 +                      bio->bi_status = BLK_STS_IOERR;
 +                      bio_endio(bio);
 +                      return ret;
 +              }
 +      }
 +      dio->size = bio->bi_iter.bi_size;
 +
 +      if (iov_iter_rw(iter) == READ) {
 +              bio->bi_opf = REQ_OP_READ;
 +              if (iter_is_iovec(iter)) {
 +                      dio->flags |= DIO_SHOULD_DIRTY;
 +                      bio_set_pages_dirty(bio);
 +              }
 +      } else {
 +              bio->bi_opf = dio_bio_write_op(iocb);
 +              task_io_account_write(bio->bi_iter.bi_size);
 +      }
 +
 +      if (iocb->ki_flags & IOCB_HIPRI) {
 +              bio->bi_opf |= REQ_POLLED | REQ_NOWAIT;
 +              submit_bio(bio);
 +              WRITE_ONCE(iocb->private, bio);
 +      } else {
 +              if (iocb->ki_flags & IOCB_NOWAIT)
 +                      bio->bi_opf |= REQ_NOWAIT;
 +              submit_bio(bio);
 +      }
 +      return -EIOCBQUEUED;
 +}
 +
  static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
  {
        unsigned int nr_pages;