diff mbox series

[03/15] block: bio-integrity: modify bio_integrity_add_page()

Message ID 20230503100639epcms2p5069df7346cfce6db9718b9adf7cc3718@epcms2p5 (mailing list archive)
State New, archived
Headers show
Series Change the integrity configuration method in block | expand

Commit Message

Jinyoung Choi May 3, 2023, 10:06 a.m. UTC
Considering the constraints of hardware, the physically continuous pages
were to be composed of one bio_vec.

Previously, bio_vec was created separately for each page entering the
parameter.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Martin K. Petersen <martin.petersen@oracle.com>

Fixes: 783b94bd9250 ("nvme-pci: do not build a scatterlist to map metadata")
Signed-off-by: Jinyoung Choi <j-young.choi@samsung.com>
---
 block/bio-integrity.c | 41 +++++++++++++++++++++++++++--------------
 1 file changed, 27 insertions(+), 14 deletions(-)
diff mbox series

Patch

diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 06b6a2c178d2..74cf9933c285 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -176,25 +176,45 @@  static bool bip_try_merge_hw_seg(struct request_queue *q,
  * @len:	number of bytes of integrity metadata in page
  * @offset:	start offset within page
  *
- * Description: Attach a page containing integrity metadata to bio.
+ * Add a page containing integrity metadata to a bio while respecting
+ * the hardware max_sectors, max_segment and gap limitations.
  */
 int bio_integrity_add_page(struct bio *bio, struct page *page,
 			   unsigned int len, unsigned int offset)
 {
+	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 	struct bio_integrity_payload *bip = bio_integrity(bio);
 
-	if (bip->bip_vcnt >= bip->bip_max_vcnt) {
+	if (((bip->bip_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
+		return 0;
+
+	if (bip->bip_vcnt > 0) {
+		struct bio_vec *bv;
+		bool same_page = false;
+
+		if (bip_try_merge_hw_seg(q, bip, page, len, offset, &same_page))
+			return len;
+
+		/*
+		 * If the queue doesn't support SG gaps and adding this segment
+		 * would create a gap, disallow it.
+		 */
+		bv = &bip->bip_vec[bip->bip_vcnt - 1];
+		if (bvec_gap_to_prev(&q->limits, bv, offset))
+			return 0;
+	}
+
+	if (bip_full(bip, len)) {
 		printk(KERN_ERR "%s: bip_vec full\n", __func__);
 		return 0;
 	}
 
-	if (bip->bip_vcnt &&
-	    bvec_gap_to_prev(&bdev_get_queue(bio->bi_bdev)->limits,
-			     &bip->bip_vec[bip->bip_vcnt - 1], offset))
+	if (bip->bip_vcnt >= queue_max_integrity_segments(q))
 		return 0;
 
 	bvec_set_page(&bip->bip_vec[bip->bip_vcnt], page, len, offset);
 	bip->bip_vcnt++;
+	bip->bip_iter.bi_size += len;
 
 	return len;
 }
@@ -307,7 +327,6 @@  bool bio_integrity_prep(struct bio *bio)
 	}
 
 	bip->bip_flags |= BIP_BLOCK_INTEGRITY;
-	bip->bip_iter.bi_size = len;
 	bip_set_seed(bip, bio->bi_iter.bi_sector);
 
 	if (bi->flags & BLK_INTEGRITY_IP_CHECKSUM)
@@ -316,7 +335,6 @@  bool bio_integrity_prep(struct bio *bio)
 	/* Map it */
 	offset = offset_in_page(buf);
 	for (i = 0 ; i < nr_pages ; i++) {
-		int ret;
 		bytes = PAGE_SIZE - offset;
 
 		if (len <= 0)
@@ -325,18 +343,13 @@  bool bio_integrity_prep(struct bio *bio)
 		if (bytes > len)
 			bytes = len;
 
-		ret = bio_integrity_add_page(bio, virt_to_page(buf),
-					     bytes, offset);
-
-		if (ret == 0) {
+		if (bio_integrity_add_page(bio, virt_to_page(buf),
+					   bytes, offset) < bytes) {
 			printk(KERN_ERR "could not attach integrity payload\n");
 			status = BLK_STS_RESOURCE;
 			goto err_end_io;
 		}
 
-		if (ret < bytes)
-			break;
-
 		buf += bytes;
 		len -= bytes;
 		offset = 0;