return bio_max_segs(howmany(count, PAGE_SIZE));
 }
 
+static void
+xfs_flush_bdev_async_endio(
+       struct bio      *bio)
+{
+       complete(bio->bi_private);
+}
+
+/*
+ * Submit a request for an async cache flush to run. If the request queue does
+ * not require flush operations, just skip it altogether. If the caller needs
+ * to wait for the flush completion at a later point in time, they must supply a
+ * valid completion. This will be signalled when the flush completes.  The
+ * caller never sees the bio that is issued here.
+ */
+void
+xfs_flush_bdev_async(
+       struct bio              *bio,
+       struct block_device     *bdev,
+       struct completion       *done)
+{
+       struct request_queue    *q = bdev->bd_disk->queue;
+
+       if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
+               complete(done);
+               return;
+       }
+
+       bio_init(bio, NULL, 0);
+       bio_set_dev(bio, bdev);
+       bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+       bio->bi_private = done;
+       bio->bi_end_io = xfs_flush_bdev_async_endio;
+
+       submit_bio(bio);
+}
 int
 xfs_rw_bdev(
        struct block_device     *bdev,
 
 
 int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count,
                char *data, unsigned int op);
+void xfs_flush_bdev_async(struct bio *bio, struct block_device *bdev,
+               struct completion *done);
 
 #define ASSERT_ALWAYS(expr)    \
        (likely(expr) ? (void)0 : assfail(NULL, #expr, __FILE__, __LINE__))