vblk->disk->driverfs_dev = &vdev->dev;
        index++;
  
 -      /* If barriers are supported, tell block layer that queue is ordered */
 -      if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
 +      if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) {
 +              /*
 +               * If the FLUSH feature is supported we do have support for
 +               * flushing a volatile write cache on the host.  Use that
 +               * to implement write barrier support.
 +               */
-               blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH,
-                                 virtblk_prepare_flush);
+               blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
 -      else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
 +      } else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) {
 +              /*
 +               * If the BARRIER feature is supported the host expects us
 +               * to order request by tags.  This implies there is not
 +               * volatile write cache on the host, and that the host
 +               * never re-orders outstanding I/O.  This feature is not
 +               * useful for real life scenarious and deprecated.
 +               */
-               blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL);
+               blk_queue_ordered(q, QUEUE_ORDERED_TAG);
 +      } else {
 +              /*
 +               * If the FLUSH feature is not supported we must assume that
 +               * the host does not perform any kind of volatile write
 +               * caching. We still need to drain the queue to provider
 +               * proper barrier semantics.
 +               */
-               blk_queue_ordered(q, QUEUE_ORDERED_DRAIN, NULL);
++              blk_queue_ordered(q, QUEUE_ORDERED_DRAIN);
 +      }
  
        /* If disk is read-only in the host, the guest should obey */
        if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
 
                }
  
                if (uptodate == 0 && rq->bio)
 -                      ide_cd_error_cmd(drive, cmd);
 +                      if (ide_cd_error_cmd(drive, cmd))
 +                              return ide_stopped;
  
                /* make sure it's fully ended */
-               if (blk_fs_request(rq) == 0) {
+               if (rq->cmd_type != REQ_TYPE_FS) {
                        rq->resid_len -= cmd->nbytes - cmd->nleft;
                        if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
                                rq->resid_len += cmd->last_xfer_len;
 
                    (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
                        return FAILED;
  
 -              return NEEDS_RETRY;
 +              if (sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
 +                      scmd_printk(KERN_WARNING, scmd,
 +                                  "Warning! Received an indication that the "
 +                                  "LUN assignments on this target have "
 +                                  "changed. The Linux SCSI layer does not "
 +                                  "automatically remap LUN assignments.\n");
 +              else if (sshdr.asc == 0x3f)
 +                      scmd_printk(KERN_WARNING, scmd,
 +                                  "Warning! Received an indication that the "
 +                                  "operating parameters on this target have "
 +                                  "changed. The Linux SCSI layer does not "
 +                                  "automatically adjust these parameters.\n");
 +
-               if (blk_barrier_rq(scmd->request))
++              if (scmd->request->cmd_flags & REQ_HARDBARRIER)
 +                      /*
 +                       * barrier requests should always retry on UA
 +                       * otherwise block will get a spurious error
 +                       */
 +                      return NEEDS_RETRY;
 +              else
 +                      /*
 +                       * for normal (non barrier) commands, pass the
 +                       * UA upwards for a determination in the
 +                       * completion functions
 +                       */
 +                      return SUCCESS;
 +
                /* these three are not supported */
        case COPY_ABORTED:
        case VOLUME_OVERFLOW:
 
  {
        struct scsi_disk *sdkp;
  
 -      async_synchronize_full();
        sdkp = dev_get_drvdata(dev);
 +      scsi_autopm_get_device(sdkp->device);
 +
 +      async_synchronize_full();
        blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
+       blk_queue_unprep_rq(sdkp->device->request_queue, NULL);
        device_del(&sdkp->dev);
        del_gendisk(sdkp->disk);
        sd_shutdown(dev);
 
  #endif
  
  #ifdef CONFIG_BLOCK
- struct bio;
  typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
                            loff_t file_offset);
 -void dio_end_io(struct bio *bio, int error);
 -
 -ssize_t __blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb, struct inode *inode,
 -      struct block_device *bdev, const struct iovec *iov, loff_t offset,
 -      unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
 -      dio_submit_t submit_io, int lock_type);
 -ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
 -      struct block_device *bdev, const struct iovec *iov, loff_t offset,
 -      unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
 -      dio_submit_t submit_io, int lock_type);
  
  enum {
        /* need locking between buffered and direct access */