diff mbox

WARNING: CPU: 2 PID: 207 at drivers/nvme/host/core.c:527 nvme_setup_cmd+0x3d3

Message ID 20180201045645.GF27735@localhost.localdomain (mailing list archive)
State New, archived
Headers show

Commit Message

Keith Busch Feb. 1, 2018, 4:56 a.m. UTC
On Wed, Jan 31, 2018 at 08:07:41PM -0700, Jens Axboe wrote:
>  	if (total_phys_segments > queue_max_segments(q))
> -		return 0;
> +			return 0;

This perhaps unintended change happens to point out another problem:
queue_max_segments is the wrong limit for discards, which require
queue_max_discard_segments.

It might be easier to merge discard requests special, like how merging
a discard bio is handled (untested).

---
--

Comments

Jens Axboe Feb. 1, 2018, 3:26 p.m. UTC | #1
On 1/31/18 9:56 PM, Keith Busch wrote:
> On Wed, Jan 31, 2018 at 08:07:41PM -0700, Jens Axboe wrote:
>>  	if (total_phys_segments > queue_max_segments(q))
>> -		return 0;
>> +			return 0;
> 
> This perhaps unintended change happens to point out another problem:
> queue_max_segments is the wrong limit for discards, which require
> queue_max_discard_segments.
> 
> It might be easier to merge discard requests special, like how merging
> a discard bio is handled (untested).

Yeah agreed, we should just split it up completely instead instead of
special casing it in the read/write path.


> diff --git a/block/blk-merge.c b/block/blk-merge.c
> index 8452fc7164cc..01671e1373ff 100644
> --- a/block/blk-merge.c
> +++ b/block/blk-merge.c
> @@ -550,6 +550,28 @@ static bool req_no_special_merge(struct request *req)
>  	return !q->mq_ops && req->special;
>  }
>  
> +static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
> +		struct request *next)
> +{
> +	unsigned short segments = blk_rq_nr_discard_segments(req);
> +
> +	if (segments >= queue_max_discard_segments(q))
> +		goto no_merge;
> +	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
> +	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
> +		goto no_merge;
> +
> +	req->biotail->bi_next = next->bio;
> +	req->biotail = next->biotail;
> +	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
> +	next->bio = NULL;
> +	return true;
> +
> +no_merge:
> +	req_set_nomerge(q, req);
> +	return false;
> +}
> +
>  static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
>  				struct request *next)
>  {
> @@ -679,6 +701,15 @@ static struct request *attempt_merge(struct request_queue *q,
>  	if (req->write_hint != next->write_hint)
>  		return NULL;
>  
> +	/*
> +	 * Discards are ... special.
> +	 */
> +	if (req_op(req) == REQ_OP_DISCARD) {
> +		if (req_attempt_discard_merge(q, req, next))
> +			return next;
> +		return NULL;
> +	}
> +
>  	/*
>  	 * If we are allowed to merge, then append bio list
>  	 * from next to rq and release next. merge_requests_fn

This looks fine to me, the bio-to-request merge path already looks correct.
Care to send a properly formatted patch?
Keith Busch Feb. 1, 2018, 6:01 p.m. UTC | #2
On Thu, Feb 01, 2018 at 08:26:11AM -0700, Jens Axboe wrote:
> On 1/31/18 9:56 PM, Keith Busch wrote:
> 
> > diff --git a/block/blk-merge.c b/block/blk-merge.c
> > index 8452fc7164cc..01671e1373ff 100644
> > --- a/block/blk-merge.c
> > +++ b/block/blk-merge.c
> > @@ -550,6 +550,28 @@ static bool req_no_special_merge(struct request *req)
> >  	return !q->mq_ops && req->special;
> >  }
> >  
> > +static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
> > +		struct request *next)
> > +{
> > +	unsigned short segments = blk_rq_nr_discard_segments(req);
> > +
> > +	if (segments >= queue_max_discard_segments(q))
> > +		goto no_merge;
> > +	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
> > +	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
> > +		goto no_merge;
> > +
> > +	req->biotail->bi_next = next->bio;
> > +	req->biotail = next->biotail;
> > +	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
> > +	next->bio = NULL;
> > +	return true;
> > +
> > +no_merge:
> > +	req_set_nomerge(q, req);
> > +	return false;
> > +}
> > +
> >  static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
> >  				struct request *next)
> >  {
> > @@ -679,6 +701,15 @@ static struct request *attempt_merge(struct request_queue *q,
> >  	if (req->write_hint != next->write_hint)
> >  		return NULL;
> >  
> > +	/*
> > +	 * Discards are ... special.
> > +	 */
> > +	if (req_op(req) == REQ_OP_DISCARD) {
> > +		if (req_attempt_discard_merge(q, req, next))
> > +			return next;
> > +		return NULL;
> > +	}
> > +
> >  	/*
> >  	 * If we are allowed to merge, then append bio list
> >  	 * from next to rq and release next. merge_requests_fn
> 
> This looks fine to me, the bio-to-request merge path already looks correct.
> Care to send a properly formatted patch?

Sending the patch now. It's a little different from the above so that it
doesn't need to duplicate some of the merging accounting.

Full disclosure, I have not found a way to trigger this merge. I'm just
running 'fio' with trim, randtrim, and trimwrite on device with
mq-deadline for the past hour, and haven't seen a merge happen yet.
diff mbox

Patch

diff --git a/block/blk-merge.c b/block/blk-merge.c
index 8452fc7164cc..01671e1373ff 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -550,6 +550,28 @@  static bool req_no_special_merge(struct request *req)
 	return !q->mq_ops && req->special;
 }
 
+static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
+		struct request *next)
+{
+	unsigned short segments = blk_rq_nr_discard_segments(req);
+
+	if (segments >= queue_max_discard_segments(q))
+		goto no_merge;
+	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
+	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
+		goto no_merge;
+
+	req->biotail->bi_next = next->bio;
+	req->biotail = next->biotail;
+	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
+	next->bio = NULL;
+	return true;
+
+no_merge:
+	req_set_nomerge(q, req);
+	return false;
+}
+
 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 				struct request *next)
 {
@@ -679,6 +701,15 @@  static struct request *attempt_merge(struct request_queue *q,
 	if (req->write_hint != next->write_hint)
 		return NULL;
 
+	/*
+	 * Discards are ... special.
+	 */
+	if (req_op(req) == REQ_OP_DISCARD) {
+		if (req_attempt_discard_merge(q, req, next))
+			return next;
+		return NULL;
+	}
+
 	/*
 	 * If we are allowed to merge, then append bio list
 	 * from next to rq and release next. merge_requests_fn