diff mbox

[3/6] block: add ability to flag write back caching on a device

Message ID 1458669320-6819-4-git-send-email-axboe@fb.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jens Axboe March 22, 2016, 5:55 p.m. UTC
Add an internal helper and flag for setting whether a queue has
write back caching, or write through (or none). Add a sysfs file
to show this as well, and make it changeable from user space.

Signed-off-by: Jens Axboe <axboe@fb.com>
---
 block/blk-settings.c   | 11 +++++++++++
 block/blk-sysfs.c      | 40 ++++++++++++++++++++++++++++++++++++++++
 include/linux/blkdev.h |  4 ++++
 3 files changed, 55 insertions(+)

Comments

Christoph Hellwig March 22, 2016, 6:57 p.m. UTC | #1
On Tue, Mar 22, 2016 at 11:55:17AM -0600, Jens Axboe wrote:
> Add an internal helper and flag for setting whether a queue has
> write back caching, or write through (or none). Add a sysfs file
> to show this as well, and make it changeable from user space.

We do this by passing the REQ_FLUSH flag to blk_queue_flush today.
While that's not a great interface, adding a second one doesn't make it
any better :)
--
To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jens Axboe March 22, 2016, 6:59 p.m. UTC | #2
On 03/22/2016 12:57 PM, Christoph Hellwig wrote:
> On Tue, Mar 22, 2016 at 11:55:17AM -0600, Jens Axboe wrote:
>> Add an internal helper and flag for setting whether a queue has
>> write back caching, or write through (or none). Add a sysfs file
>> to show this as well, and make it changeable from user space.
>
> We do this by passing the REQ_FLUSH flag to blk_queue_flush today.
> While that's not a great interface, adding a second one doesn't make it
> any better :)

I think the newer one is cleaner, so would make more sense to put the 
flush part on top.
diff mbox

Patch

diff --git a/block/blk-settings.c b/block/blk-settings.c
index c7bb666aafd1..4dbd511a9889 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -846,6 +846,17 @@  void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
 }
 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
 
+void blk_queue_write_cache(struct request_queue *q, bool enabled)
+{
+	spin_lock_irq(q->queue_lock);
+	if (enabled)
+		queue_flag_set(QUEUE_FLAG_WC, q);
+	else
+		queue_flag_clear(QUEUE_FLAG_WC, q);
+	spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_write_cache);
+
 static int __init blk_settings_init(void)
 {
 	blk_max_low_pfn = max_low_pfn - 1;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index dd93763057ce..deb2270bf1f3 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -347,6 +347,39 @@  static ssize_t queue_poll_store(struct request_queue *q, const char *page,
 	return ret;
 }
 
+static ssize_t queue_wc_show(struct request_queue *q, char *page)
+{
+	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+		return sprintf(page, "write back\n");
+
+	return sprintf(page, "write through\n");
+}
+
+static ssize_t queue_wc_store(struct request_queue *q, const char *page,
+			      size_t count)
+{
+	ssize_t ret;
+	int set = -1;
+
+	if (!strncmp(page, "write back", 10))
+		set = 1;
+	else if (!strncmp(page, "write through", 13) ||
+		 !strncmp(page, "none", 4))
+		set = 0;
+
+	if (set == -1)
+		return -EINVAL;
+		
+	spin_lock_irq(q->queue_lock);
+	if (set)
+		queue_flag_set(QUEUE_FLAG_WC, q);
+	else
+		queue_flag_clear(QUEUE_FLAG_WC, q);
+	spin_unlock_irq(q->queue_lock);
+
+	return ret;
+}
+
 static struct queue_sysfs_entry queue_requests_entry = {
 	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
 	.show = queue_requests_show,
@@ -478,6 +511,12 @@  static struct queue_sysfs_entry queue_poll_entry = {
 	.store = queue_poll_store,
 };
 
+static struct queue_sysfs_entry queue_wc_entry = {
+	.attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
+	.show = queue_wc_show,
+	.store = queue_wc_store,
+};
+
 static struct attribute *default_attrs[] = {
 	&queue_requests_entry.attr,
 	&queue_ra_entry.attr,
@@ -503,6 +542,7 @@  static struct attribute *default_attrs[] = {
 	&queue_iostats_entry.attr,
 	&queue_random_entry.attr,
 	&queue_poll_entry.attr,
+	&queue_wc_entry.attr,
 	NULL,
 };
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 930bd4c5b7ff..da5e85c35318 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -491,15 +491,18 @@  struct request_queue {
 #define QUEUE_FLAG_INIT_DONE   20	/* queue is initialized */
 #define QUEUE_FLAG_NO_SG_MERGE 21	/* don't attempt to merge SG segments*/
 #define QUEUE_FLAG_POLL	       22	/* IO polling enabled if set */
+#define QUEUE_FLAG_WC	       23	/* Write back caching */
 
 #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
 				 (1 << QUEUE_FLAG_SAME_COMP)	|	\
+				 (1 << QUEUE_FLAG_WC)		|	\
 				 (1 << QUEUE_FLAG_ADD_RANDOM))
 
 #define QUEUE_FLAG_MQ_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
 				 (1 << QUEUE_FLAG_SAME_COMP)	|	\
+				 (1 << QUEUE_FLAG_WC)		|	\
 				 (1 << QUEUE_FLAG_POLL))
 
 static inline void queue_lockdep_assert_held(struct request_queue *q)
@@ -1009,6 +1012,7 @@  extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
 extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
+extern void blk_queue_write_cache(struct request_queue *q, bool enabled);
 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
 
 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);