diff mbox series

[RESEND,v2] block-map: added error handling for bio_copy_kern()

Message ID 20220429055854epcms2p7e5045065a4732fb59b0fb1345c348a45@epcms2p7 (mailing list archive)
State New, archived
Headers show
Series [RESEND,v2] block-map: added error handling for bio_copy_kern() | expand

Commit Message

Jinyoung Choi April 29, 2022, 5:58 a.m. UTC
When new pages are allocated to bio through alloc_page() in
bio_copy_kern(), the pages must be freed in error handling after that.

There is little chance of an error occurring in blk_rq_append_bio(), but
in the code flow, pages additionally allocated to bio must be released.

V2:
	- replace int with bool

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jinyoung Choi <j-young.choi@samsung.com>
---
 block/blk-map.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/block/blk-map.c b/block/blk-map.c
index df8b066cd548..613990fa87e1 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -637,6 +637,7 @@  int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 	int reading = rq_data_dir(rq) == READ;
 	unsigned long addr = (unsigned long) kbuf;
 	struct bio *bio;
+	bool do_copy;
 	int ret;
 
 	if (len > (queue_max_hw_sectors(q) << 9))
@@ -644,8 +645,9 @@  int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 	if (!len || !kbuf)
 		return -EINVAL;
 
-	if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) ||
-	    blk_queue_may_bounce(q))
+	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) ||
+		blk_queue_may_bounce(q);
+	if (do_copy)
 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
 	else
 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
@@ -658,6 +660,8 @@  int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 
 	ret = blk_rq_append_bio(rq, bio);
 	if (unlikely(ret)) {
+		if (do_copy)
+			bio_free_pages(bio);
 		bio_uninit(bio);
 		kfree(bio);
 	}