@@ -21,6 +21,58 @@ Description:
device is offset from the internal allocation unit's
natural alignment.
+What: /sys/block/<disk>/atomic_write_max_bytes
+Date: February 2024
+Contact: Himanshu Madhani <himanshu.madhani@oracle.com>
+Description:
+ [RO] This parameter specifies the maximum atomic write
+ size reported by the device. This parameter is relevant
+ for merging of writes, where a merged atomic write
+ operation must not exceed this number of bytes.
+ This parameter may be greater to the value in
+ atomic_write_unit_max_bytes as
+ atomic_write_unit_max_bytes will be rounded down to a
+ power-of-two and atomic_write_unit_max_bytes may also be
+ limited by some other queue limits, such as max_segments.
+ This parameter - along with atomic_write_unit_min_bytes
+ and atomic_write_unit_max_bytes - will not be larger than
+ max_hw_sectors_kb, but may be larger than max_sectors_kb.
+
+
+What: /sys/block/<disk>/atomic_write_unit_min_bytes
+Date: February 2024
+Contact: Himanshu Madhani <himanshu.madhani@oracle.com>
+Description:
+ [RO] This parameter specifies the smallest block which can
+ be written atomically with an atomic write operation. All
+ atomic write operations must begin at a
+ atomic_write_unit_min boundary and must be multiples of
+ atomic_write_unit_min. This value must be a power-of-two.
+
+
+What: /sys/block/<disk>/atomic_write_unit_max_bytes
+Date: February 2024
+Contact: Himanshu Madhani <himanshu.madhani@oracle.com>
+Description:
+ [RO] This parameter defines the largest block which can be
+ written atomically with an atomic write operation. This
+ value must be a multiple of atomic_write_unit_min and must
+ be a power-of-two. This value will not be larger than
+ atomic_write_max_bytes.
+
+
+What: /sys/block/<disk>/atomic_write_boundary_bytes
+Date: February 2024
+Contact: Himanshu Madhani <himanshu.madhani@oracle.com>
+Description:
+ [RO] A device may need to internally split I/Os which
+ straddle a given logical block address boundary. In that
+ case a single atomic write operation will be processed as
+ one of more sub-operations which each complete atomically.
+ This parameter specifies the size in bytes of the atomic
+ boundary if one is reported by the device. This value must
+ be a power-of-two.
+
What: /sys/block/<disk>/diskseq
Date: February 2021
@@ -174,6 +174,8 @@ static const struct {
/* Command duration limit device-side timeout */
[BLK_STS_DURATION_LIMIT] = { -ETIME, "duration limit exceeded" },
+ [BLK_STS_INVAL] = { -EINVAL, "invalid" },
+
/* everything else not covered above: */
[BLK_STS_IOERR] = { -EIO, "I/O" },
};
@@ -729,6 +731,18 @@ void submit_bio_noacct_nocheck(struct bio *bio)
__submit_bio_noacct(bio);
}
+static blk_status_t blk_validate_atomic_write_op_size(struct request_queue *q,
+ struct bio *bio)
+{
+ if (bio->bi_iter.bi_size > queue_atomic_write_unit_max_bytes(q))
+ return BLK_STS_INVAL;
+
+ if (bio->bi_iter.bi_size % queue_atomic_write_unit_min_bytes(q))
+ return BLK_STS_INVAL;
+
+ return BLK_STS_OK;
+}
+
/**
* submit_bio_noacct - re-submit a bio to the block device layer for I/O
* @bio: The bio describing the location in memory and on the device.
@@ -786,6 +800,11 @@ void submit_bio_noacct(struct bio *bio)
switch (bio_op(bio)) {
case REQ_OP_READ:
case REQ_OP_WRITE:
+ if (bio->bi_opf & REQ_ATOMIC) {
+ status = blk_validate_atomic_write_op_size(q, bio);
+ if (status != BLK_STS_OK)
+ goto end_io;
+ }
break;
case REQ_OP_FLUSH:
/*
@@ -18,6 +18,46 @@
#include "blk-rq-qos.h"
#include "blk-throttle.h"
+/*
+ * rq_straddles_atomic_write_boundary - check for boundary violation
+ * @rq: request to check
+ * @front: data size to be appended to front
+ * @back: data size to be appended to back
+ *
+ * Determine whether merging a request or bio into another request will result
+ * in a merged request which straddles an atomic write boundary.
+ *
+ * The value @front_adjust is the data which would be appended to the front of
+ * @rq, while the value @back_adjust is the data which would be appended to the
+ * back of @rq. Callers will typically only have either @front_adjust or
+ * @back_adjust as non-zero.
+ *
+ */
+static bool rq_straddles_atomic_write_boundary(struct request *rq,
+ unsigned int front_adjust,
+ unsigned int back_adjust)
+{
+ unsigned int boundary = queue_atomic_write_boundary_bytes(rq->q);
+ u64 mask, start_rq_pos, end_rq_pos;
+
+ if (!boundary)
+ return false;
+
+ start_rq_pos = blk_rq_pos(rq) << SECTOR_SHIFT;
+ end_rq_pos = start_rq_pos + blk_rq_bytes(rq) - 1;
+
+ start_rq_pos -= front_adjust;
+ end_rq_pos += back_adjust;
+
+ mask = ~(boundary - 1);
+
+ /* Top bits are different, so crossed a boundary */
+ if ((start_rq_pos & mask) != (end_rq_pos & mask))
+ return true;
+
+ return false;
+}
+
static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
{
*bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
@@ -167,7 +207,16 @@ static inline unsigned get_max_io_size(struct bio *bio,
{
unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
- unsigned max_sectors = lim->max_sectors, start, end;
+ unsigned max_sectors, start, end;
+
+ /*
+ * We ignore lim->max_sectors for atomic writes simply because
+ * it may less than the bio size, which we cannot tolerate.
+ */
+ if (bio->bi_opf & REQ_ATOMIC)
+ max_sectors = lim->atomic_write_max_sectors;
+ else
+ max_sectors = lim->max_sectors;
if (lim->chunk_sectors) {
max_sectors = min(max_sectors,
@@ -305,6 +354,11 @@ struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
*segs = nsegs;
return NULL;
split:
+ if (bio->bi_opf & REQ_ATOMIC) {
+ bio->bi_status = BLK_STS_INVAL;
+ bio_endio(bio);
+ return ERR_PTR(-EINVAL);
+ }
/*
* We can't sanely support splitting for a REQ_NOWAIT bio. End it
* with EAGAIN if splitting is required and return an error pointer.
@@ -645,6 +699,13 @@ int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
return 0;
}
+ if (req->cmd_flags & REQ_ATOMIC) {
+ if (rq_straddles_atomic_write_boundary(req,
+ 0, bio->bi_iter.bi_size)) {
+ return 0;
+ }
+ }
+
return ll_new_hw_segment(req, bio, nr_segs);
}
@@ -664,6 +725,13 @@ static int ll_front_merge_fn(struct request *req, struct bio *bio,
return 0;
}
+ if (req->cmd_flags & REQ_ATOMIC) {
+ if (rq_straddles_atomic_write_boundary(req,
+ bio->bi_iter.bi_size, 0)) {
+ return 0;
+ }
+ }
+
return ll_new_hw_segment(req, bio, nr_segs);
}
@@ -700,6 +768,13 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
blk_rq_get_max_sectors(req, blk_rq_pos(req)))
return 0;
+ if (req->cmd_flags & REQ_ATOMIC) {
+ if (rq_straddles_atomic_write_boundary(req,
+ 0, blk_rq_bytes(next))) {
+ return 0;
+ }
+ }
+
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
if (total_phys_segments > blk_rq_get_max_segments(req))
return 0;
@@ -795,6 +870,18 @@ static enum elv_merge blk_try_req_merge(struct request *req,
return ELEVATOR_NO_MERGE;
}
+static bool blk_atomic_write_mergeable_rq_bio(struct request *rq,
+ struct bio *bio)
+{
+ return (rq->cmd_flags & REQ_ATOMIC) == (bio->bi_opf & REQ_ATOMIC);
+}
+
+static bool blk_atomic_write_mergeable_rqs(struct request *rq,
+ struct request *next)
+{
+ return (rq->cmd_flags & REQ_ATOMIC) == (next->cmd_flags & REQ_ATOMIC);
+}
+
/*
* For non-mq, this has to be called with the request spinlock acquired.
* For mq with scheduling, the appropriate queue wide lock should be held.
@@ -818,6 +905,9 @@ static struct request *attempt_merge(struct request_queue *q,
if (req->ioprio != next->ioprio)
return NULL;
+ if (!blk_atomic_write_mergeable_rqs(req, next))
+ return NULL;
+
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
@@ -949,6 +1039,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (rq->ioprio != bio_prio(bio))
return false;
+ if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false)
+ return false;
+
return true;
}
@@ -97,6 +97,41 @@ static int blk_validate_zoned_limits(struct queue_limits *lim)
return 0;
}
+/*
+ * Returns max guaranteed bytes which we can fit in a bio.
+ *
+ * We always assume that we can fit in at least PAGE_SIZE in a segment, apart
+ * from first and last segments.
+ */
+static
+unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *limits)
+{
+ unsigned int max_segments = min(BIO_MAX_VECS, limits->max_segments);
+ unsigned int length;
+
+ length = min(max_segments, 2) * limits->logical_block_size;
+ if (max_segments > 2)
+ length += (max_segments - 2) * PAGE_SIZE;
+
+ return length;
+}
+
+static void blk_atomic_writes_update_limits(struct queue_limits *limits)
+{
+ unsigned int unit_limit = min(limits->max_hw_sectors << SECTOR_SHIFT,
+ blk_queue_max_guaranteed_bio(limits));
+
+ unit_limit = rounddown_pow_of_two(unit_limit);
+
+ limits->atomic_write_max_sectors =
+ min(limits->atomic_write_hw_max >> SECTOR_SHIFT,
+ limits->max_hw_sectors);
+ limits->atomic_write_unit_min =
+ min(limits->atomic_write_hw_unit_min, unit_limit);
+ limits->atomic_write_unit_max =
+ min(limits->atomic_write_hw_unit_max, unit_limit);
+}
+
/*
* Check that the limits in lim are valid, initialize defaults for unset
* values, and cap values based on others where needed.
@@ -221,6 +256,23 @@ static int blk_validate_limits(struct queue_limits *lim)
lim->misaligned = 0;
}
+ /*
+ * The atomic write boundary size just needs to be a multiple of
+ * unit_max (and not necessarily a power-of-2), so this following check
+ * could be relaxed in future.
+ * Furthermore, if needed, unit_max could be reduced so that the
+ * boundary size was compliant (with a !power-of-2 boundary).
+ */
+ if (lim->atomic_write_hw_boundary &&
+ !is_power_of_2(lim->atomic_write_hw_boundary)) {
+
+ lim->atomic_write_hw_max = 0;
+ lim->atomic_write_hw_boundary = 0;
+ lim->atomic_write_hw_unit_min = 0;
+ lim->atomic_write_hw_unit_max = 0;
+ }
+ blk_atomic_writes_update_limits(lim);
+
return blk_validate_zoned_limits(lim);
}
@@ -344,6 +396,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
limits->logical_block_size >> SECTOR_SHIFT);
limits->max_sectors = max_sectors;
+ blk_atomic_writes_update_limits(limits);
+
if (!q->disk)
return;
q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
@@ -384,6 +438,61 @@ void blk_queue_max_discard_sectors(struct request_queue *q,
}
EXPORT_SYMBOL(blk_queue_max_discard_sectors);
+/**
+ * blk_queue_atomic_write_max_bytes - set max bytes supported by
+ * the device for atomic write operations.
+ * @q: the request queue for the device
+ * @bytes: maximum bytes supported
+ */
+void blk_queue_atomic_write_max_bytes(struct request_queue *q,
+ unsigned int bytes)
+{
+ q->limits.atomic_write_hw_max = bytes;
+ blk_atomic_writes_update_limits(&q->limits);
+}
+EXPORT_SYMBOL(blk_queue_atomic_write_max_bytes);
+
+/**
+ * blk_queue_atomic_write_boundary_bytes - Device's logical block address space
+ * which an atomic write should not cross.
+ * @q: the request queue for the device
+ * @bytes: must be a power-of-two.
+ */
+void blk_queue_atomic_write_boundary_bytes(struct request_queue *q,
+ unsigned int bytes)
+{
+ q->limits.atomic_write_hw_boundary = bytes;
+}
+EXPORT_SYMBOL(blk_queue_atomic_write_boundary_bytes);
+
+/**
+ * blk_queue_atomic_write_unit_min_bytes - smallest unit that can be written
+ * atomically to the device.
+ * @q: the request queue for the device
+ * @bytes: must be a power-of-two.
+ */
+void blk_queue_atomic_write_unit_min_bytes(struct request_queue *q,
+ unsigned int bytes)
+{
+ q->limits.atomic_write_hw_unit_min = bytes;
+ blk_atomic_writes_update_limits(&q->limits);
+}
+EXPORT_SYMBOL(blk_queue_atomic_write_unit_min_bytes);
+
+/*
+ * blk_queue_atomic_write_unit_max_bytes - largest unit that can be written
+ * atomically to the device.
+ * @q: the request queue for the device
+ * @bytes: must be a power-of-two.
+ */
+void blk_queue_atomic_write_unit_max_bytes(struct request_queue *q,
+ unsigned int bytes)
+{
+ q->limits.atomic_write_hw_unit_max = bytes;
+ blk_atomic_writes_update_limits(&q->limits);
+}
+EXPORT_SYMBOL(blk_queue_atomic_write_unit_max_bytes);
+
/**
* blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
* @q: the request queue for the device
@@ -118,6 +118,30 @@ static ssize_t queue_max_discard_segments_show(struct request_queue *q,
return queue_var_show(queue_max_discard_segments(q), page);
}
+static ssize_t queue_atomic_write_max_bytes_show(struct request_queue *q,
+ char *page)
+{
+ return queue_var_show(queue_atomic_write_max_bytes(q), page);
+}
+
+static ssize_t queue_atomic_write_boundary_show(struct request_queue *q,
+ char *page)
+{
+ return queue_var_show(queue_atomic_write_boundary_bytes(q), page);
+}
+
+static ssize_t queue_atomic_write_unit_min_show(struct request_queue *q,
+ char *page)
+{
+ return queue_var_show(queue_atomic_write_unit_min_bytes(q), page);
+}
+
+static ssize_t queue_atomic_write_unit_max_show(struct request_queue *q,
+ char *page)
+{
+ return queue_var_show(queue_atomic_write_unit_max_bytes(q), page);
+}
+
static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
{
return queue_var_show(q->limits.max_integrity_segments, page);
@@ -495,6 +519,11 @@ QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
+QUEUE_RO_ENTRY(queue_atomic_write_max_bytes, "atomic_write_max_bytes");
+QUEUE_RO_ENTRY(queue_atomic_write_boundary, "atomic_write_boundary_bytes");
+QUEUE_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes");
+QUEUE_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes");
+
QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
@@ -622,6 +651,10 @@ static struct attribute *queue_attrs[] = {
&queue_discard_max_entry.attr,
&queue_discard_max_hw_entry.attr,
&queue_discard_zeroes_data_entry.attr,
+ &queue_atomic_write_max_bytes_entry.attr,
+ &queue_atomic_write_boundary_entry.attr,
+ &queue_atomic_write_unit_min_entry.attr,
+ &queue_atomic_write_unit_max_entry.attr,
&queue_write_same_max_entry.attr,
&queue_write_zeroes_max_entry.attr,
&queue_zone_append_max_entry.attr,
@@ -192,6 +192,9 @@ static inline unsigned int blk_queue_get_max_sectors(struct request *rq)
if (unlikely(op == REQ_OP_WRITE_ZEROES))
return q->limits.max_write_zeroes_sectors;
+ if (rq->cmd_flags & REQ_ATOMIC)
+ return q->limits.atomic_write_max_sectors;
+
return q->limits.max_sectors;
}
@@ -179,6 +179,11 @@ typedef u16 blk_short_t;
*/
#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)18)
+/*
+ * Invalid size or alignment.
+ */
+#define BLK_STS_INVAL ((__force blk_status_t)19)
+
/**
* blk_path_error - returns true if error may be path related
* @error: status the request was completed with
@@ -381,7 +386,7 @@ enum req_flag_bits {
__REQ_SWAP, /* swap I/O */
__REQ_DRV, /* for driver use */
__REQ_FS_PRIVATE, /* for file system (submitter) use */
-
+ __REQ_ATOMIC, /* for atomic write operations */
/*
* Command specific flags, keep last:
*/
@@ -413,6 +418,7 @@ enum req_flag_bits {
#define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP)
#define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
#define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
+#define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC)
#define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
@@ -300,6 +300,15 @@ struct queue_limits {
unsigned int discard_alignment;
unsigned int zone_write_granularity;
+ /* atomic write limits */
+ unsigned int atomic_write_hw_max;
+ unsigned int atomic_write_max_sectors;
+ unsigned int atomic_write_hw_boundary;
+ unsigned int atomic_write_hw_unit_min;
+ unsigned int atomic_write_unit_min;
+ unsigned int atomic_write_hw_unit_max;
+ unsigned int atomic_write_unit_max;
+
unsigned short max_segments;
unsigned short max_integrity_segments;
unsigned short max_discard_segments;
@@ -916,6 +925,14 @@ void blk_queue_zone_write_granularity(struct request_queue *q,
unsigned int size);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
+void blk_queue_atomic_write_max_bytes(struct request_queue *q,
+ unsigned int bytes);
+void blk_queue_atomic_write_boundary_bytes(struct request_queue *q,
+ unsigned int bytes);
+void blk_queue_atomic_write_unit_max_bytes(struct request_queue *q,
+ unsigned int bytes);
+void blk_queue_atomic_write_unit_min_bytes(struct request_queue *q,
+ unsigned int bytes);
void disk_update_readahead(struct gendisk *disk);
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
@@ -1339,6 +1356,30 @@ static inline int queue_dma_alignment(const struct request_queue *q)
return q ? q->limits.dma_alignment : 511;
}
+static inline unsigned int
+queue_atomic_write_unit_max_bytes(const struct request_queue *q)
+{
+ return q->limits.atomic_write_unit_max;
+}
+
+static inline unsigned int
+queue_atomic_write_unit_min_bytes(const struct request_queue *q)
+{
+ return q->limits.atomic_write_unit_min;
+}
+
+static inline unsigned int
+queue_atomic_write_boundary_bytes(const struct request_queue *q)
+{
+ return q->limits.atomic_write_hw_boundary;
+}
+
+static inline unsigned int
+queue_atomic_write_max_bytes(const struct request_queue *q)
+{
+ return q->limits.atomic_write_max_sectors << SECTOR_SHIFT;
+}
+
static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
{
return queue_dma_alignment(bdev_get_queue(bdev));
@@ -1592,6 +1633,26 @@ struct io_comp_batch {
void (*complete)(struct io_comp_batch *);
};
+static inline bool bdev_can_atomic_write(struct block_device *bdev)
+{
+ struct request_queue *bd_queue = bdev->bd_queue;
+ struct queue_limits *limits = &bd_queue->limits;
+
+ if (!limits->atomic_write_unit_min)
+ return false;
+
+ if (bdev_is_partition(bdev)) {
+ sector_t bd_start_sect = bdev->bd_start_sect;
+ unsigned int alignment =
+ max(limits->atomic_write_unit_min,
+ limits->atomic_write_hw_boundary);
+ if (!IS_ALIGNED(bd_start_sect, alignment))
+ return false;
+ }
+
+ return true;
+}
+
#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
#endif /* _LINUX_BLKDEV_H */