#include "blk.h"
 
-/**
- * blk_queue_ordered - does this queue support ordered writes
- * @q:        the request queue
- * @ordered:  one of QUEUE_ORDERED_*
- *
- * Description:
- *   For journalled file systems, doing ordered writes on a commit
- *   block instead of explicitly doing wait_on_buffer (which is bad
- *   for performance) can be a big win. Block drivers supporting this
- *   feature should call this function and indicate so.
- *
- **/
-int blk_queue_ordered(struct request_queue *q, unsigned ordered)
-{
-       if (ordered != QUEUE_ORDERED_NONE &&
-           ordered != QUEUE_ORDERED_DRAIN &&
-           ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
-           ordered != QUEUE_ORDERED_DRAIN_FUA) {
-               printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
-               return -EINVAL;
-       }
-
-       q->ordered = ordered;
-       q->next_ordered = ordered;
-
-       return 0;
-}
-EXPORT_SYMBOL(blk_queue_ordered);
-
 /*
  * Cache flushing for ordered writes handling
  */
 
        const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
        int rw_flags;
 
-       if ((bio->bi_rw & REQ_HARDBARRIER) &&
-           (q->next_ordered == QUEUE_ORDERED_NONE)) {
+       /* REQ_HARDBARRIER is no more */
+       if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
+               "block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
                bio_endio(bio, -EOPNOTSUPP);
                return 0;
        }
+
        /*
         * low level driver can indicate that it wants pages above a
         * certain limit bounced to low memory (ie for highmem, or even
 
 }
 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
 
+/**
+ * blk_queue_flush - configure queue's cache flush capability
+ * @q:         the request queue for the device
+ * @flush:     0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
+ *
+ * Tell block layer cache flush capability of @q.  If it supports
+ * flushing, REQ_FLUSH should be set.  If it supports bypassing
+ * write cache for individual writes, REQ_FUA should be set.
+ */
+void blk_queue_flush(struct request_queue *q, unsigned int flush)
+{
+       WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
+
+       if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
+               flush &= ~REQ_FUA;
+
+       q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
+}
+EXPORT_SYMBOL_GPL(blk_queue_flush);
+
 static int __init blk_settings_init(void)
 {
        blk_max_low_pfn = max_low_pfn - 1;
 
        if (!brd->brd_queue)
                goto out_free_dev;
        blk_queue_make_request(brd->brd_queue, brd_make_request);
-       blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_DRAIN);
        blk_queue_max_hw_sectors(brd->brd_queue, 1024);
        blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
 
 
        lo->lo_queue->unplug_fn = loop_unplug;
 
        if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
-               blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN_FLUSH);
+               blk_queue_flush(lo->lo_queue, REQ_FLUSH);
 
        set_capacity(lo->lo_disk, size);
        bd_set_size(bdev, size << 9);
 
        blk_queue_stack_limits(q, osd_request_queue(osdev->osd));
 
        blk_queue_prep_rq(q, blk_queue_start_tag);
-       blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
+       blk_queue_flush(q, REQ_FLUSH);
 
        disk->queue = q;
 
 
        blk_queue_dma_alignment(queue, dev->blk_size-1);
        blk_queue_logical_block_size(queue, dev->blk_size);
 
-       blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH);
+       blk_queue_flush(queue, REQ_FLUSH);
 
        blk_queue_max_segments(queue, -1);
        blk_queue_max_segment_size(queue, dev->bounce_size);
 
        vblk->disk->driverfs_dev = &vdev->dev;
        index++;
 
-       if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) {
-               /*
-                * If the FLUSH feature is supported we do have support for
-                * flushing a volatile write cache on the host.  Use that
-                * to implement write barrier support.
-                */
-               blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
-       } else {
-               /*
-                * If the FLUSH feature is not supported we must assume that
-                * the host does not perform any kind of volatile write
-                * caching. We still need to drain the queue to provider
-                * proper barrier semantics.
-                */
-               blk_queue_ordered(q, QUEUE_ORDERED_DRAIN);
-       }
+       /*
+        * If the FLUSH feature is supported we do have support for
+        * flushing a volatile write cache on the host.  Use that to
+        * implement write barrier support; otherwise, we must assume
+        * that the host does not perform any kind of volatile write
+        * caching.
+        */
+       if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
+               blk_queue_flush(q, REQ_FLUSH);
 
        /* If disk is read-only in the host, the guest should obey */
        if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
 
        struct gnttab_free_callback callback;
        struct blk_shadow shadow[BLK_RING_SIZE];
        unsigned long shadow_free;
-       int feature_barrier;
+       unsigned int feature_flush;
        int is_ready;
 };
 
 }
 
 
-static int xlvbd_barrier(struct blkfront_info *info)
+static void xlvbd_flush(struct blkfront_info *info)
 {
-       int err;
-       const char *barrier;
-
-       switch (info->feature_barrier) {
-       case QUEUE_ORDERED_DRAIN:       barrier = "enabled"; break;
-       case QUEUE_ORDERED_NONE:        barrier = "disabled"; break;
-       default:                        return -EINVAL;
-       }
-
-       err = blk_queue_ordered(info->rq, info->feature_barrier);
-
-       if (err)
-               return err;
-
+       blk_queue_flush(info->rq, info->feature_flush);
        printk(KERN_INFO "blkfront: %s: barriers %s\n",
-              info->gd->disk_name, barrier);
-       return 0;
+              info->gd->disk_name,
+              info->feature_flush ? "enabled" : "disabled");
 }
 
 
        info->rq = gd->queue;
        info->gd = gd;
 
-       xlvbd_barrier(info);
+       xlvbd_flush(info);
 
        if (vdisk_info & VDISK_READONLY)
                set_disk_ro(gd, 1);
                                printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
                                       info->gd->disk_name);
                                error = -EOPNOTSUPP;
-                               info->feature_barrier = QUEUE_ORDERED_NONE;
-                               xlvbd_barrier(info);
+                               info->feature_flush = 0;
+                               xlvbd_flush(info);
                        }
                        /* fall through */
                case BLKIF_OP_READ:
        /*
         * If there's no "feature-barrier" defined, then it means
         * we're dealing with a very old backend which writes
-        * synchronously; draining will do what needs to get done.
+        * synchronously; nothing to do.
         *
         * If there are barriers, then we use flush.
-        *
-        * If barriers are not supported, then there's no much we can
-        * do, so just set ordering to NONE.
         */
-       if (err)
-               info->feature_barrier = QUEUE_ORDERED_DRAIN;
-       else if (barrier)
-               info->feature_barrier = QUEUE_ORDERED_DRAIN_FLUSH;
-       else
-               info->feature_barrier = QUEUE_ORDERED_NONE;
+       info->feature_flush = 0;
+       if (!err && barrier)
+               info->feature_flush = REQ_FLUSH;
 
        err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
        if (err) {
 
        return ide_no_data_taskfile(drive, &cmd);
 }
 
-static void update_ordered(ide_drive_t *drive)
+static void update_flush(ide_drive_t *drive)
 {
        u16 *id = drive->id;
-       unsigned ordered = QUEUE_ORDERED_NONE;
+       unsigned flush = 0;
 
        if (drive->dev_flags & IDE_DFLAG_WCACHE) {
                unsigned long long capacity;
                       drive->name, barrier ? "" : "not ");
 
                if (barrier) {
-                       ordered = QUEUE_ORDERED_DRAIN_FLUSH;
+                       flush = REQ_FLUSH;
                        blk_queue_prep_rq(drive->queue, idedisk_prep_fn);
                }
-       } else
-               ordered = QUEUE_ORDERED_DRAIN;
+       }
 
-       blk_queue_ordered(drive->queue, ordered);
+       blk_queue_flush(drive->queue, flush);
 }
 
 ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
                }
        }
 
-       update_ordered(drive);
+       update_flush(drive);
 
        return err;
 }
 
        blk_queue_softirq_done(md->queue, dm_softirq_done);
        blk_queue_prep_rq(md->queue, dm_prep_fn);
        blk_queue_lld_busy(md->queue, dm_lld_busy);
-       blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
+       blk_queue_flush(md->queue, REQ_FLUSH);
 
        elv_register_queue(md->queue);
 
 
        mq->req = NULL;
 
        blk_queue_prep_rq(mq->queue, mmc_prep_request);
-       blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN);
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
        if (mmc_can_erase(card)) {
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
 
         */
        blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
        blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
-       blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN);
 }
 
 /*
 
        struct scsi_disk *sdkp = scsi_disk(disk);
        struct scsi_device *sdp = sdkp->device;
        unsigned char *buffer;
-       unsigned ordered;
+       unsigned flush = 0;
 
        SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
                                      "sd_revalidate_disk\n"));
 
        /*
         * We now have all cache related info, determine how we deal
-        * with ordered requests.
+        * with flush requests.
         */
-       if (sdkp->WCE)
-               ordered = sdkp->DPOFUA
-                       ? QUEUE_ORDERED_DRAIN_FUA : QUEUE_ORDERED_DRAIN_FLUSH;
-       else
-               ordered = QUEUE_ORDERED_DRAIN;
+       if (sdkp->WCE) {
+               flush |= REQ_FLUSH;
+               if (sdkp->DPOFUA)
+                       flush |= REQ_FUA;
+       }
 
-       blk_queue_ordered(sdkp->disk->queue, ordered);
+       blk_queue_flush(sdkp->disk->queue, flush);
 
        set_capacity(disk, sdkp->capacity);
        kfree(buffer);
 
        struct blk_trace        *blk_trace;
 #endif
        /*
-        * reserved for flush operations
+        * for flush operations
         */
+       unsigned int            flush_flags;
+
        unsigned int            ordered, next_ordered, ordseq;
        int                     orderr, ordcolor;
        struct request          pre_flush_rq, bar_rq, post_flush_rq;
 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
+extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
-extern int blk_queue_ordered(struct request_queue *, unsigned);
 extern bool blk_do_ordered(struct request_queue *, struct request **);
 extern unsigned blk_ordered_cur_seq(struct request_queue *);
 extern unsigned blk_ordered_req_seq(struct request *);