diff mbox series

[2/5] blk-mq: rename hctx_lock & hctx_unlock

Message ID 20211119021849.2259254-3-ming.lei@redhat.com (mailing list archive)
State New, archived
Headers show
Series blk-mq: quiesce improvement | expand

Commit Message

Ming Lei Nov. 19, 2021, 2:18 a.m. UTC
We have moved srcu from 'struct blk_mq_hw_ctx' into 'struct request_queue',
both hctx_lock and hctx_unlock are run on request queue level, so rename
them as queue_lock and queue_unlock().

And it could be used for supporting Jens's ->queue_rqs(), as suggested
by Keith.

Also it could be extended for driver uses in future.

Cc: Keith Busch <kbusch@kernel.org>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 block/blk-mq.c | 40 +++++++++++++++++++++++-----------------
 1 file changed, 23 insertions(+), 17 deletions(-)

Comments

Sagi Grimberg Nov. 22, 2021, 7:53 a.m. UTC | #1
> -static inline void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
> -	__releases(hctx->srcu)
> +static inline void queue_unlock(struct request_queue *q, bool blocking,
> +		int srcu_idx)
> +	__releases(q->srcu)
>   {
> -	if (!(hctx->flags & BLK_MQ_F_BLOCKING))
> +	if (!blocking)
>   		rcu_read_unlock();
>   	else
> -		srcu_read_unlock(hctx->queue->srcu, srcu_idx);
> +		srcu_read_unlock(q->srcu, srcu_idx);

Maybe instead of passing blocking bool just look at srcu_idx?

	if (srcu_idx < 0)
		rcu_read_unlock();
	else
		srcu_read_unlock(q->srcu, srcu_idx);

Or look if the queue has srcu allocated?

	if (!q->srcu)
		rcu_read_unlock();
	else
		srcu_read_unlock(q->srcu, srcu_idx);

>   }
>   
> -static inline void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
> +static inline void queue_lock(struct request_queue *q, bool blocking,
> +		int *srcu_idx)
>   	__acquires(hctx->srcu)
>   {
> -	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
> +	if (!blocking) {
>   		/* shut up gcc false positive */
>   		*srcu_idx = 0;
>   		rcu_read_lock();
>   	} else
> -		*srcu_idx = srcu_read_lock(hctx->queue->srcu);
> +		*srcu_idx = srcu_read_lock(q->srcu);

Same here:
	
	if (!q->srcu)
		rcu_read_lock();
	else
		srcu_idx = srcu_read_lock(q->srcu);
Christoph Hellwig Nov. 22, 2021, 9 a.m. UTC | #2
On Fri, Nov 19, 2021 at 10:18:46AM +0800, Ming Lei wrote:
> +static inline void queue_unlock(struct request_queue *q, bool blocking,
> +		int srcu_idx)

I don't think this is a good name, as it can be easily confused with
q->queue_lock.

> +	__releases(q->srcu)
>  {
> -	if (!(hctx->flags & BLK_MQ_F_BLOCKING))
> +	if (!blocking)
>  		rcu_read_unlock();
>  	else
> -		srcu_read_unlock(hctx->queue->srcu, srcu_idx);
> +		srcu_read_unlock(q->srcu, srcu_idx);
>  }

I think you want to make BLK_MQ_F_BLOCKING accessible from the
request_queue instead of passing the extra argument as well.
Ming Lei Nov. 22, 2021, 1:20 p.m. UTC | #3
On Mon, Nov 22, 2021 at 09:53:53AM +0200, Sagi Grimberg wrote:
> 
> > -static inline void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
> > -	__releases(hctx->srcu)
> > +static inline void queue_unlock(struct request_queue *q, bool blocking,
> > +		int srcu_idx)
> > +	__releases(q->srcu)
> >   {
> > -	if (!(hctx->flags & BLK_MQ_F_BLOCKING))
> > +	if (!blocking)
> >   		rcu_read_unlock();
> >   	else
> > -		srcu_read_unlock(hctx->queue->srcu, srcu_idx);
> > +		srcu_read_unlock(q->srcu, srcu_idx);
> 
> Maybe instead of passing blocking bool just look at srcu_idx?
> 
> 	if (srcu_idx < 0)
> 		rcu_read_unlock();
> 	else
> 		srcu_read_unlock(q->srcu, srcu_idx);

This way needs to initialize srcu_idx in each callers.

> 
> Or look if the queue has srcu allocated?
> 
> 	if (!q->srcu)
> 		rcu_read_unlock();
> 	else
> 		srcu_read_unlock(q->srcu, srcu_idx);

This way is worse since q->srcu may involve one new cacheline fetch.

hctx->flags is always hot, so it is basically zero cost to check it.


Thanks,
Ming
Sagi Grimberg Nov. 22, 2021, 1:50 p.m. UTC | #4
On 11/22/21 3:20 PM, Ming Lei wrote:
> On Mon, Nov 22, 2021 at 09:53:53AM +0200, Sagi Grimberg wrote:
>>
>>> -static inline void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
>>> -	__releases(hctx->srcu)
>>> +static inline void queue_unlock(struct request_queue *q, bool blocking,
>>> +		int srcu_idx)
>>> +	__releases(q->srcu)
>>>    {
>>> -	if (!(hctx->flags & BLK_MQ_F_BLOCKING))
>>> +	if (!blocking)
>>>    		rcu_read_unlock();
>>>    	else
>>> -		srcu_read_unlock(hctx->queue->srcu, srcu_idx);
>>> +		srcu_read_unlock(q->srcu, srcu_idx);
>>
>> Maybe instead of passing blocking bool just look at srcu_idx?
>>
>> 	if (srcu_idx < 0)
>> 		rcu_read_unlock();
>> 	else
>> 		srcu_read_unlock(q->srcu, srcu_idx);
> 
> This way needs to initialize srcu_idx in each callers.

Then look at q->has_srcu that Bart suggested?

> 
>>
>> Or look if the queue has srcu allocated?
>>
>> 	if (!q->srcu)
>> 		rcu_read_unlock();
>> 	else
>> 		srcu_read_unlock(q->srcu, srcu_idx);
> 
> This way is worse since q->srcu may involve one new cacheline fetch.
> 
> hctx->flags is always hot, so it is basically zero cost to check it.

Yea, but the interface is awkward that the caller tells the
routine how it should lock/unlock...
Ming Lei Nov. 23, 2021, 12:08 a.m. UTC | #5
On Mon, Nov 22, 2021 at 03:50:14PM +0200, Sagi Grimberg wrote:
> 
> 
> On 11/22/21 3:20 PM, Ming Lei wrote:
> > On Mon, Nov 22, 2021 at 09:53:53AM +0200, Sagi Grimberg wrote:
> > > 
> > > > -static inline void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
> > > > -	__releases(hctx->srcu)
> > > > +static inline void queue_unlock(struct request_queue *q, bool blocking,
> > > > +		int srcu_idx)
> > > > +	__releases(q->srcu)
> > > >    {
> > > > -	if (!(hctx->flags & BLK_MQ_F_BLOCKING))
> > > > +	if (!blocking)
> > > >    		rcu_read_unlock();
> > > >    	else
> > > > -		srcu_read_unlock(hctx->queue->srcu, srcu_idx);
> > > > +		srcu_read_unlock(q->srcu, srcu_idx);
> > > 
> > > Maybe instead of passing blocking bool just look at srcu_idx?
> > > 
> > > 	if (srcu_idx < 0)
> > > 		rcu_read_unlock();
> > > 	else
> > > 		srcu_read_unlock(q->srcu, srcu_idx);
> > 
> > This way needs to initialize srcu_idx in each callers.
> 
> Then look at q->has_srcu that Bart suggested?

Bart just suggested to rename q->alloc_srcu as q->has_srcu.

> 
> > 
> > > 
> > > Or look if the queue has srcu allocated?
> > > 
> > > 	if (!q->srcu)
> > > 		rcu_read_unlock();
> > > 	else
> > > 		srcu_read_unlock(q->srcu, srcu_idx);
> > 
> > This way is worse since q->srcu may involve one new cacheline fetch.
> > 
> > hctx->flags is always hot, so it is basically zero cost to check it.
> 
> Yea, but the interface is awkward that the caller tells the
> routine how it should lock/unlock...

If the two helpers are just blk-mq internal, I think it is fine to keep
this way with comment.

If driver needs the two exported, they should be often used in slow path, then
it is fine to refine the interface type.


Thanks,
Ming
Sagi Grimberg Nov. 23, 2021, 8:54 a.m. UTC | #6
>> Then look at q->has_srcu that Bart suggested?
> 
> Bart just suggested to rename q->alloc_srcu as q->has_srcu.

Yea, is there a problem using that instead of having callers
pass a flag?
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9728a571b009..ba0d0e411b65 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1076,24 +1076,26 @@  void blk_mq_complete_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
-static inline void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
-	__releases(hctx->srcu)
+static inline void queue_unlock(struct request_queue *q, bool blocking,
+		int srcu_idx)
+	__releases(q->srcu)
 {
-	if (!(hctx->flags & BLK_MQ_F_BLOCKING))
+	if (!blocking)
 		rcu_read_unlock();
 	else
-		srcu_read_unlock(hctx->queue->srcu, srcu_idx);
+		srcu_read_unlock(q->srcu, srcu_idx);
 }
 
-static inline void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
+static inline void queue_lock(struct request_queue *q, bool blocking,
+		int *srcu_idx)
 	__acquires(hctx->srcu)
 {
-	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
+	if (!blocking) {
 		/* shut up gcc false positive */
 		*srcu_idx = 0;
 		rcu_read_lock();
 	} else
-		*srcu_idx = srcu_read_lock(hctx->queue->srcu);
+		*srcu_idx = srcu_read_lock(q->srcu);
 }
 
 /**
@@ -1958,6 +1960,7 @@  bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
 {
 	int srcu_idx;
+	bool blocking = hctx->flags & BLK_MQ_F_BLOCKING;
 
 	/*
 	 * We can't run the queue inline with ints disabled. Ensure that
@@ -1965,11 +1968,11 @@  static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
 	 */
 	WARN_ON_ONCE(in_interrupt());
 
-	might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
+	might_sleep_if(blocking);
 
-	hctx_lock(hctx, &srcu_idx);
+	queue_lock(hctx->queue, blocking, &srcu_idx);
 	blk_mq_sched_dispatch_requests(hctx);
-	hctx_unlock(hctx, srcu_idx);
+	queue_unlock(hctx->queue, blocking, srcu_idx);
 }
 
 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
@@ -2083,6 +2086,7 @@  void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
 {
 	int srcu_idx;
 	bool need_run;
+	bool blocking = hctx->flags & BLK_MQ_F_BLOCKING;
 
 	/*
 	 * When queue is quiesced, we may be switching io scheduler, or
@@ -2092,10 +2096,10 @@  void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
 	 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
 	 * quiesced.
 	 */
-	hctx_lock(hctx, &srcu_idx);
+	queue_lock(hctx->queue, blocking, &srcu_idx);
 	need_run = !blk_queue_quiesced(hctx->queue) &&
 		blk_mq_hctx_has_pending(hctx);
-	hctx_unlock(hctx, srcu_idx);
+	queue_unlock(hctx->queue, blocking, srcu_idx);
 
 	if (need_run)
 		__blk_mq_delay_run_hw_queue(hctx, async, 0);
@@ -2500,10 +2504,11 @@  static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 {
 	blk_status_t ret;
 	int srcu_idx;
+	bool blocking = hctx->flags & BLK_MQ_F_BLOCKING;
 
-	might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
+	might_sleep_if(blocking);
 
-	hctx_lock(hctx, &srcu_idx);
+	queue_lock(hctx->queue, blocking, &srcu_idx);
 
 	ret = __blk_mq_try_issue_directly(hctx, rq, false, true);
 	if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
@@ -2511,7 +2516,7 @@  static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 	else if (ret != BLK_STS_OK)
 		blk_mq_end_request(rq, ret);
 
-	hctx_unlock(hctx, srcu_idx);
+	queue_unlock(hctx->queue, blocking, srcu_idx);
 }
 
 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
@@ -2519,10 +2524,11 @@  static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
 	blk_status_t ret;
 	int srcu_idx;
 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+	bool blocking = hctx->flags & BLK_MQ_F_BLOCKING;
 
-	hctx_lock(hctx, &srcu_idx);
+	queue_lock(hctx->queue, blocking, &srcu_idx);
 	ret = __blk_mq_try_issue_directly(hctx, rq, true, last);
-	hctx_unlock(hctx, srcu_idx);
+	queue_unlock(hctx->queue, blocking, srcu_idx);
 
 	return ret;
 }