diff mbox series

[07/10] nvmet: switch to using blk_next_discard_bio directly

Message ID 20240307151157.466013-8-hch@lst.de (mailing list archive)
State New
Headers show
Series [01/10] block: remove the discard_granularity check in __blkdev_issue_discard | expand

Commit Message

Christoph Hellwig March 7, 2024, 3:11 p.m. UTC
This fixes fatal signals getting into the way and corrupting the bio
chain and removes the need to handle synchronous errors.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/target/io-cmd-bdev.c | 16 ++++++----------
 1 file changed, 6 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index f11400a908f269..c1345aaf837d93 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -363,17 +363,13 @@  u16 nvmet_bdev_flush(struct nvmet_req *req)
 static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
 		struct nvme_dsm_range *range, struct bio **bio)
 {
-	struct nvmet_ns *ns = req->ns;
-	int ret;
+	sector_t sector = nvmet_lba_to_sect(req->ns, range->slba);
+	sector_t nr_sects = le32_to_cpu(range->nlb) <<
+		(req->ns->blksize_shift - SECTOR_SHIFT);
 
-	ret = __blkdev_issue_discard(ns->bdev,
-			nvmet_lba_to_sect(ns, range->slba),
-			le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
-			GFP_KERNEL, bio);
-	if (ret && ret != -EOPNOTSUPP) {
-		req->error_slba = le64_to_cpu(range->slba);
-		return errno_to_nvme_status(req, ret);
-	}
+	while (blk_next_discard_bio(req->ns->bdev, bio, &sector, &nr_sects,
+			GFP_KERNEL))
+		;
 	return NVME_SC_SUCCESS;
 }