@@ -1311,6 +1311,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
unsigned int request_count = 0;
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
+ struct bio *orig = bio;
blk_qc_t cookie;
blk_queue_bounce(q, &bio);
@@ -1389,7 +1390,7 @@ run_queue:
}
blk_mq_put_ctx(data.ctx);
done:
- return cookie;
+ return bio == orig ? cookie : BLK_QC_T_NONE;
}
/*
@@ -1404,6 +1405,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
unsigned int request_count = 0;
struct blk_map_ctx data;
struct request *rq;
+ struct bio *orig = bio;
blk_qc_t cookie;
blk_queue_bounce(q, &bio);
@@ -1467,7 +1469,7 @@ run_queue:
}
blk_mq_put_ctx(data.ctx);
- return cookie;
+ return bio == orig ? cookie : BLK_QC_T_NONE;
}
/*
The only user of polling requires its original request be completed in its entirety before continuing execution. If the bio needs to be split and chained for any reason, the direct IO path would have waited for just that split portion to complete, leading to potential data corruption if the remaining transfer has not yet completed. This patch has blk-mq return an invalid cookie if a bio requires splitting so that polling does not occur. Signed-off-by: Keith Busch <keith.busch@intel.com> --- block/blk-mq.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)