]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
block: add ability to flag write back caching on a device
authorJens Axboe <axboe@fb.com>
Tue, 12 Apr 2016 18:32:46 +0000 (12:32 -0600)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 1 Jun 2017 20:41:15 +0000 (13:41 -0700)
Add an internal helper and flag for setting whether a queue has
write back caching, or write through (or none). Add a sysfs file
to show this as well, and make it changeable from user space.

This will replace the (awkward) blk_queue_flush() interface that
drivers currently use to inform the block layer of write cache state
and capabilities.

Signed-off-by: Jens Axboe <axboe@fb.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
(cherry picked from commit 93e9d8e836cb1a9a58b33eb6643bf061c6119ef2)

Orabug: 25130845

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Documentation/block/queue-sysfs.txt
block/blk-settings.c
block/blk-sysfs.c
include/linux/blkdev.h

index 3a29f8914df9cea8a27048960f3b8b489e8d3ce4..7ab91af2a160a461f2e2502339de87d8a365f9f0 100644 (file)
@@ -133,6 +133,15 @@ control of this block device to that new IO scheduler. Note that writing
 an IO scheduler name to this file will attempt to load that IO scheduler
 module, if it isn't already present in the system.
 
+write_cache (RW)
+----------------
+When read, this file will display whether the device has write back
+caching enabled or not. It will return "write back" for the former
+case, and "write through" for the latter. Writing to this file can
+change the kernels view of the device, but it doesn't alter the
+device state. This means that it might not be safe to toggle the
+setting from "write back" to "write through", since that will also
+eliminate cache flushes issued by the kernel.
 
 
 Jens Axboe <jens.axboe@oracle.com>, February 2009
index 7259915352a6f688675f9d8932cc613785ac1343..5891a1cba4711b1ac6303a94268e61c8e63f8af4 100644 (file)
@@ -879,6 +879,32 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
 }
 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
 
+/**
+ * blk_queue_write_cache - configure queue's write cache
+ * @q:         the request queue for the device
+ * @wc:                write back cache on or off
+ * @fua:       device supports FUA writes, if true
+ *
+ * Tell the block layer about the write cache of @q.
+ */
+void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
+{
+       spin_lock_irq(q->queue_lock);
+       if (wc) {
+               queue_flag_set(QUEUE_FLAG_WC, q);
+               q->flush_flags = REQ_FLUSH;
+       } else
+               queue_flag_clear(QUEUE_FLAG_WC, q);
+       if (fua) {
+               if (wc)
+                       q->flush_flags |= REQ_FUA;
+               queue_flag_set(QUEUE_FLAG_FUA, q);
+       } else
+               queue_flag_clear(QUEUE_FLAG_FUA, q);
+       spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_write_cache);
+
 static int __init blk_settings_init(void)
 {
        blk_max_low_pfn = max_low_pfn - 1;
index 4cdef0b69bda6f0479ed917936b85b67ce72a332..f73b8565d83b0ecb2cd7aecc701652864319303b 100644 (file)
@@ -288,6 +288,38 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
        return ret;
 }
 
+static ssize_t queue_wc_show(struct request_queue *q, char *page)
+{
+       if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+               return sprintf(page, "write back\n");
+
+       return sprintf(page, "write through\n");
+}
+
+static ssize_t queue_wc_store(struct request_queue *q, const char *page,
+                             size_t count)
+{
+       int set = -1;
+
+       if (!strncmp(page, "write back", 10))
+               set = 1;
+       else if (!strncmp(page, "write through", 13) ||
+                !strncmp(page, "none", 4))
+               set = 0;
+
+       if (set == -1)
+               return -EINVAL;
+
+       spin_lock_irq(q->queue_lock);
+       if (set)
+               queue_flag_set(QUEUE_FLAG_WC, q);
+       else
+               queue_flag_clear(QUEUE_FLAG_WC, q);
+       spin_unlock_irq(q->queue_lock);
+
+       return count;
+}
+
 static struct queue_sysfs_entry queue_requests_entry = {
        .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
        .show = queue_requests_show,
@@ -407,6 +439,12 @@ static struct queue_sysfs_entry queue_random_entry = {
        .store = queue_store_random,
 };
 
+static struct queue_sysfs_entry queue_wc_entry = {
+       .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
+       .show = queue_wc_show,
+       .store = queue_wc_store,
+};
+
 static struct attribute *default_attrs[] = {
        &queue_requests_entry.attr,
        &queue_ra_entry.attr,
@@ -430,6 +468,7 @@ static struct attribute *default_attrs[] = {
        &queue_rq_affinity_entry.attr,
        &queue_iostats_entry.attr,
        &queue_random_entry.attr,
+       &queue_wc_entry.attr,
        NULL,
 };
 
index c96bf856579c8b9a266a83bea8487d14910f23b5..b119db52d1620e5134f164d9b57ccb6dfec683f2 100644 (file)
@@ -542,6 +542,9 @@ struct request_queue {
 #define QUEUE_FLAG_INIT_DONE   20      /* queue is initialized */
 #define QUEUE_FLAG_NO_SG_MERGE 21      /* don't attempt to merge SG segments*/
 #define QUEUE_FLAG_SG_GAPS     22      /* queue doesn't support SG gaps */
+#define QUEUE_FLAG_POLL               23       /* IO polling enabled if set */
+#define QUEUE_FLAG_WC         24       /* Write back caching */
+#define QUEUE_FLAG_FUA        25       /* device supports FUA writes */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \
@@ -1074,6 +1077,7 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
 extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
+extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
 
 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);