@@ -93,6 +93,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->virt_boundary_mask = 0;
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
+ lim->max_atomic_write_sectors = 0;
lim->max_dev_sectors = 0;
lim->chunk_sectors = 0;
lim->max_write_same_sectors = 0;
@@ -129,6 +130,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
lim->discard_zeroes_data = 1;
lim->max_segments = USHRT_MAX;
lim->max_hw_sectors = UINT_MAX;
+ lim->max_atomic_write_sectors = 0;
lim->max_segment_size = UINT_MAX;
lim->max_sectors = UINT_MAX;
lim->max_dev_sectors = UINT_MAX;
@@ -258,6 +260,24 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
/**
+ * blk_queue_max_atomic_write_sectors - maximum sectors written atomically
+ * @q: the request queue for the device
+ * @max_hw_sectors: max hardware sectors in the usual 512b unit
+ *
+ * Description:
+ * Enables a low level driver to advertise that it supports writing
+ * multi-sector I/O atomically. If the driver has any requirements
+ * in addition to the maximum size it should not set this field to
+ * indicate that it supports multi-sector atomic writes.
+ **/
+void blk_queue_max_atomic_write_sectors(struct request_queue *q,
+ unsigned int max_atomic_write_sectors)
+{
+ q->limits.max_atomic_write_sectors = max_atomic_write_sectors;
+}
+EXPORT_SYMBOL_GPL(blk_queue_max_atomic_write_sectors);
+
+/**
* blk_queue_chunk_sectors - set size of the chunk for this queue
* @q: the request queue for the device
* @chunk_sectors: chunk sectors in the usual 512b unit
@@ -541,6 +561,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
+ /* no support for stacking atomic writes */
+ t->max_atomic_write_sectors = 0;
t->max_write_same_sectors = min(t->max_write_same_sectors,
b->max_write_same_sectors);
t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
@@ -249,6 +249,12 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
return queue_var_show(max_hw_sectors_kb, (page));
}
+static ssize_t queue_max_atomic_write_sectors_show(struct request_queue *q,
+ char *page)
+{
+ return queue_var_show(queue_max_atomic_write_sectors(q) << 1, page);
+}
+
#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
static ssize_t \
queue_show_##name(struct request_queue *q, char *page) \
@@ -540,6 +546,11 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
.show = queue_max_hw_sectors_show,
};
+static struct queue_sysfs_entry queue_max_atomic_write_sectors_entry = {
+ .attr = {.name = "max_atomic_write_sectors_kb", .mode = S_IRUGO },
+ .show = queue_max_atomic_write_sectors_show,
+};
+
static struct queue_sysfs_entry queue_max_segments_entry = {
.attr = {.name = "max_segments", .mode = S_IRUGO },
.show = queue_max_segments_show,
@@ -695,6 +706,7 @@ static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
&queue_max_hw_sectors_entry.attr,
+ &queue_max_atomic_write_sectors_entry.attr,
&queue_max_sectors_entry.attr,
&queue_max_segments_entry.attr,
&queue_max_integrity_segments_entry.attr,
@@ -323,6 +323,7 @@ struct queue_limits {
unsigned int alignment_offset;
unsigned int io_min;
unsigned int io_opt;
+ unsigned int max_atomic_write_sectors;
unsigned int max_discard_sectors;
unsigned int max_hw_discard_sectors;
unsigned int max_write_same_sectors;
@@ -1135,6 +1136,8 @@ extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
+extern void blk_queue_max_atomic_write_sectors(struct request_queue *,
+ unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
@@ -1371,6 +1374,12 @@ static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
return q->limits.max_hw_sectors;
}
+static inline unsigned int queue_max_atomic_write_sectors(
+ struct request_queue *q)
+{
+ return q->limits.max_atomic_write_sectors;
+}
+
static inline unsigned short queue_max_segments(struct request_queue *q)
{
return q->limits.max_segments;
Signed-off-by: Christoph Hellwig <hch@lst.de> --- block/blk-settings.c | 22 ++++++++++++++++++++++ block/blk-sysfs.c | 12 ++++++++++++ include/linux/blkdev.h | 9 +++++++++ 3 files changed, 43 insertions(+)