diff mbox series

[v2,02/16] block: Limit atomic writes according to bio and queue limits

Message ID 20231212110844.19698-3-john.g.garry@oracle.com (mailing list archive)
State Not Applicable
Headers show
Series block atomic writes | expand

Commit Message

John Garry Dec. 12, 2023, 11:08 a.m. UTC
We rely the block layer always being able to send a bio of size
atomic_write_unit_max without being required to split it due to request
queue or other bio limits.

A bio may contain min(BIO_MAX_VECS, limits->max_segments) vectors on the
relevant submission paths for atomic writes and each vector contains at
least a PAGE_SIZE of data, apart from the first vector.

Signed-off-by: John Garry <john.g.garry@oracle.com>
---
 block/blk-settings.c | 28 ++++++++++++++++++++++++++--
 1 file changed, 26 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/block/blk-settings.c b/block/blk-settings.c
index d151be394c98..dd699580fccd 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -213,6 +213,26 @@  void blk_queue_atomic_write_boundary_bytes(struct request_queue *q,
 }
 EXPORT_SYMBOL(blk_queue_atomic_write_boundary_bytes);
 
+
+/*
+ * Returns max guaranteed sectors which we can fit in a bio. For convenience of
+ * users, rounddown_pow_of_two() the return value.
+ *
+ * We always assume that we can fit in at least PAGE_SIZE in a segment.
+ */
+static unsigned int blk_queue_max_guaranteed_bio_sectors(
+					struct queue_limits *limits)
+{
+	unsigned int max_segments = min_t(unsigned int, BIO_MAX_VECS,
+					  limits->max_segments);
+
+	if (max_segments < 2)
+		return 0;
+
+	/* subtract 2 to assume PAGE-misaligned IOV start address */
+	return rounddown_pow_of_two((max_segments - 1) * PAGE_SECTORS);
+}
+
 /**
  * blk_queue_atomic_write_unit_min_sectors - smallest unit that can be written
  * atomically to the device.
@@ -223,8 +243,10 @@  void blk_queue_atomic_write_unit_min_sectors(struct request_queue *q,
 					     unsigned int sectors)
 {
 	struct queue_limits *limits = &q->limits;
+	unsigned int guaranteed_sectors =
+		blk_queue_max_guaranteed_bio_sectors(limits);
 
-	limits->atomic_write_unit_min_sectors = sectors;
+	limits->atomic_write_unit_min_sectors = min(guaranteed_sectors, sectors);
 }
 EXPORT_SYMBOL(blk_queue_atomic_write_unit_min_sectors);
 
@@ -238,8 +260,10 @@  void blk_queue_atomic_write_unit_max_sectors(struct request_queue *q,
 					     unsigned int sectors)
 {
 	struct queue_limits *limits = &q->limits;
+	unsigned int guaranteed_sectors =
+		blk_queue_max_guaranteed_bio_sectors(limits);
 
-	limits->atomic_write_unit_max_sectors = sectors;
+	limits->atomic_write_unit_max_sectors = min(guaranteed_sectors, sectors);
 }
 EXPORT_SYMBOL(blk_queue_atomic_write_unit_max_sectors);