diff mbox

WARNING: CPU: 2 PID: 207 at drivers/nvme/host/core.c:527 nvme_setup_cmd+0x3d3

Message ID 7ec361a3-8900-d259-6842-e0b0b14a253a@kernel.dk (mailing list archive)
State New, archived
Headers show

Commit Message

Jens Axboe Feb. 1, 2018, 5:58 p.m. UTC
On 2/1/18 8:26 AM, Jens Axboe wrote:
> On 1/31/18 9:56 PM, Keith Busch wrote:
>> On Wed, Jan 31, 2018 at 08:07:41PM -0700, Jens Axboe wrote:
>>>  	if (total_phys_segments > queue_max_segments(q))
>>> -		return 0;
>>> +			return 0;
>>
>> This perhaps unintended change happens to point out another problem:
>> queue_max_segments is the wrong limit for discards, which require
>> queue_max_discard_segments.
>>
>> It might be easier to merge discard requests special, like how merging
>> a discard bio is handled (untested).
> 
> Yeah agreed, we should just split it up completely instead instead of
> special casing it in the read/write path.
> 
> 
>> diff --git a/block/blk-merge.c b/block/blk-merge.c
>> index 8452fc7164cc..01671e1373ff 100644
>> --- a/block/blk-merge.c
>> +++ b/block/blk-merge.c
>> @@ -550,6 +550,28 @@ static bool req_no_special_merge(struct request *req)
>>  	return !q->mq_ops && req->special;
>>  }
>>  
>> +static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
>> +		struct request *next)
>> +{
>> +	unsigned short segments = blk_rq_nr_discard_segments(req);
>> +
>> +	if (segments >= queue_max_discard_segments(q))
>> +		goto no_merge;
>> +	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
>> +	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
>> +		goto no_merge;
>> +
>> +	req->biotail->bi_next = next->bio;
>> +	req->biotail = next->biotail;
>> +	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
>> +	next->bio = NULL;
>> +	return true;
>> +
>> +no_merge:
>> +	req_set_nomerge(q, req);
>> +	return false;
>> +}
>> +
>>  static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
>>  				struct request *next)
>>  {
>> @@ -679,6 +701,15 @@ static struct request *attempt_merge(struct request_queue *q,
>>  	if (req->write_hint != next->write_hint)
>>  		return NULL;
>>  
>> +	/*
>> +	 * Discards are ... special.
>> +	 */
>> +	if (req_op(req) == REQ_OP_DISCARD) {
>> +		if (req_attempt_discard_merge(q, req, next))
>> +			return next;
>> +		return NULL;
>> +	}
>> +
>>  	/*
>>  	 * If we are allowed to merge, then append bio list
>>  	 * from next to rq and release next. merge_requests_fn
> 
> This looks fine to me, the bio-to-request merge path already looks correct.
> Care to send a properly formatted patch?

I was able to reproduce on a test box, pretty trivially in fact:

# echo mq-deadline > /sys/block/nvme2n1/queue/scheduler
# mkfs.ext4 /dev/nvme2n1
# mount /dev/nvme2n1 /data -o discard
# dd if=/dev/zero of=/data/10g bs=1M count=10k
# sync
# rm /data/10g
# sync <- triggered

Your patch still doesn't work, but mainly because we init the segments
to 0 when setting up a discard. The below works for me, and cleans up
the merge path a bit, since your patch was missing various adjustments
on both the merged and freed request.

Comments

Keith Busch Feb. 1, 2018, 6:12 p.m. UTC | #1
On Thu, Feb 01, 2018 at 10:58:23AM -0700, Jens Axboe wrote:
> I was able to reproduce on a test box, pretty trivially in fact:
> 
> # echo mq-deadline > /sys/block/nvme2n1/queue/scheduler
> # mkfs.ext4 /dev/nvme2n1
> # mount /dev/nvme2n1 /data -o discard
> # dd if=/dev/zero of=/data/10g bs=1M count=10k
> # sync
> # rm /data/10g
> # sync <- triggered

Nice! Thanks, this recipe works for me too.
 
> Your patch still doesn't work, but mainly because we init the segments
> to 0 when setting up a discard. The below works for me, and cleans up
> the merge path a bit, since your patch was missing various adjustments
> on both the merged and freed request.

Yep, your update is very similiar to my real patch, but I'm missing one
thing (elv_merge_requests). If you're already testing successfully with
your patch, I don't mind if you want to move forward with yours.

 
> diff --git a/block/blk-core.c b/block/blk-core.c
> index a2005a485335..e4561c95fc23 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -3282,6 +3282,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
>  {
>  	if (bio_has_data(bio))
>  		rq->nr_phys_segments = bio_phys_segments(q, bio);
> +	else if (bio_op(bio) == REQ_OP_DISCARD)
> +		rq->nr_phys_segments = 1;
>  
>  	rq->__data_len = bio->bi_iter.bi_size;
>  	rq->bio = rq->biotail = bio;
> diff --git a/block/blk-merge.c b/block/blk-merge.c
> index 8452fc7164cc..782940c65d8a 100644
> --- a/block/blk-merge.c
> +++ b/block/blk-merge.c
> @@ -550,6 +550,24 @@ static bool req_no_special_merge(struct request *req)
>  	return !q->mq_ops && req->special;
>  }
>  
> +static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
> +		struct request *next)
> +{
> +	unsigned short segments = blk_rq_nr_discard_segments(req);
> +
> +	if (segments >= queue_max_discard_segments(q))
> +		goto no_merge;
> +	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
> +	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
> +		goto no_merge;
> +
> +	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
> +	return true;
> +no_merge:
> +	req_set_nomerge(q, req);
> +	return false;
> +}
> +
>  static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
>  				struct request *next)
>  {
> @@ -683,9 +701,13 @@ static struct request *attempt_merge(struct request_queue *q,
>  	 * If we are allowed to merge, then append bio list
>  	 * from next to rq and release next. merge_requests_fn
>  	 * will have updated segment counts, update sector
> -	 * counts here.
> +	 * counts here. Handle DISCARDs separately, as they
> +	 * have separate settings.
>  	 */
> -	if (!ll_merge_requests_fn(q, req, next))
> +	if (req_op(req) == REQ_OP_DISCARD) {
> +		if (!req_attempt_discard_merge(q, req, next))
> +			return NULL;
> +	} else if (!ll_merge_requests_fn(q, req, next))
>  		return NULL;
>  
>  	/*
> @@ -715,7 +737,8 @@ static struct request *attempt_merge(struct request_queue *q,
>  
>  	req->__data_len += blk_rq_bytes(next);
>  
> -	elv_merge_requests(q, req, next);
> +	if (req_op(req) != REQ_OP_DISCARD)
> +		elv_merge_requests(q, req, next);
>  
>  	/*
>  	 * 'next' is going away, so update stats accordingly
> 
> -- 
> Jens Axboe
>
diff mbox

Patch

diff --git a/block/blk-core.c b/block/blk-core.c
index a2005a485335..e4561c95fc23 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3282,6 +3282,8 @@  void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 {
 	if (bio_has_data(bio))
 		rq->nr_phys_segments = bio_phys_segments(q, bio);
+	else if (bio_op(bio) == REQ_OP_DISCARD)
+		rq->nr_phys_segments = 1;
 
 	rq->__data_len = bio->bi_iter.bi_size;
 	rq->bio = rq->biotail = bio;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 8452fc7164cc..782940c65d8a 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -550,6 +550,24 @@  static bool req_no_special_merge(struct request *req)
 	return !q->mq_ops && req->special;
 }
 
+static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
+		struct request *next)
+{
+	unsigned short segments = blk_rq_nr_discard_segments(req);
+
+	if (segments >= queue_max_discard_segments(q))
+		goto no_merge;
+	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
+	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
+		goto no_merge;
+
+	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
+	return true;
+no_merge:
+	req_set_nomerge(q, req);
+	return false;
+}
+
 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 				struct request *next)
 {
@@ -683,9 +701,13 @@  static struct request *attempt_merge(struct request_queue *q,
 	 * If we are allowed to merge, then append bio list
 	 * from next to rq and release next. merge_requests_fn
 	 * will have updated segment counts, update sector
-	 * counts here.
+	 * counts here. Handle DISCARDs separately, as they
+	 * have separate settings.
 	 */
-	if (!ll_merge_requests_fn(q, req, next))
+	if (req_op(req) == REQ_OP_DISCARD) {
+		if (!req_attempt_discard_merge(q, req, next))
+			return NULL;
+	} else if (!ll_merge_requests_fn(q, req, next))
 		return NULL;
 
 	/*
@@ -715,7 +737,8 @@  static struct request *attempt_merge(struct request_queue *q,
 
 	req->__data_len += blk_rq_bytes(next);
 
-	elv_merge_requests(q, req, next);
+	if (req_op(req) != REQ_OP_DISCARD)
+		elv_merge_requests(q, req, next);
 
 	/*
 	 * 'next' is going away, so update stats accordingly