diff mbox series

[V2,2/2] blk-mq: make sure elevator callbacks aren't called for passthrough request

Message ID 20230515144601.52811-3-ming.lei@redhat.com (mailing list archive)
State New, archived
Headers show
Series blk-mq: handle passthrough request as really passthrough | expand

Commit Message

Ming Lei May 15, 2023, 2:46 p.m. UTC
In case of q->elevator, passthrought request can still be marked as RQF_ELV,
so some elevator callbacks will be called for passthrough request.

Add helper of blk_mq_bypass_sched(), so that we can bypass elevator
callbacks for both flush and passthrough request.

Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 block/blk-mq-sched.h | 9 +++++++--
 block/blk-mq.c       | 5 ++---
 2 files changed, 9 insertions(+), 5 deletions(-)

Comments

Bart Van Assche May 15, 2023, 3:52 p.m. UTC | #1
On 5/15/23 07:46, Ming Lei wrote:
> @@ -48,7 +53,7 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
>   
>   static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
>   {
> -	if (rq->rq_flags & RQF_ELV) {
> +	if ((rq->rq_flags & RQF_ELV) && !blk_mq_bypass_sched(rq->cmd_flags)) {
>   		struct elevator_queue *e = rq->q->elevator;
>   
>   		if (e->type->ops.completed_request)
> @@ -58,7 +63,7 @@ static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
>   
>   static inline void blk_mq_sched_requeue_request(struct request *rq)
>   {
> -	if (rq->rq_flags & RQF_ELV) {
> +	if ((rq->rq_flags & RQF_ELV) && !blk_mq_bypass_sched(rq->cmd_flags)) {
>   		struct request_queue *q = rq->q;
>   		struct elevator_queue *e = q->elevator;

Has it been considered not to set RQF_ELV for passthrough requests 
instead of making the above changes?

Thanks,

Bart.
Keith Busch May 15, 2023, 8:22 p.m. UTC | #2
On Mon, May 15, 2023 at 08:52:38AM -0700, Bart Van Assche wrote:
> On 5/15/23 07:46, Ming Lei wrote:
> > @@ -48,7 +53,7 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
> >   static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
> >   {
> > -	if (rq->rq_flags & RQF_ELV) {
> > +	if ((rq->rq_flags & RQF_ELV) && !blk_mq_bypass_sched(rq->cmd_flags)) {
> >   		struct elevator_queue *e = rq->q->elevator;
> >   		if (e->type->ops.completed_request)
> > @@ -58,7 +63,7 @@ static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
> >   static inline void blk_mq_sched_requeue_request(struct request *rq)
> >   {
> > -	if (rq->rq_flags & RQF_ELV) {
> > +	if ((rq->rq_flags & RQF_ELV) && !blk_mq_bypass_sched(rq->cmd_flags)) {
> >   		struct request_queue *q = rq->q;
> >   		struct elevator_queue *e = q->elevator;
> 
> Has it been considered not to set RQF_ELV for passthrough requests instead
> of making the above changes?

That sounds like a good idea. It changes more behavior than what Ming is
targeting here, but after looking through each use for RQF_ELV, I think
not having that set really is the right thing to do in all cases for
passthrough requests.
Ming Lei May 16, 2023, 1:20 a.m. UTC | #3
On Mon, May 15, 2023 at 02:22:18PM -0600, Keith Busch wrote:
> On Mon, May 15, 2023 at 08:52:38AM -0700, Bart Van Assche wrote:
> > On 5/15/23 07:46, Ming Lei wrote:
> > > @@ -48,7 +53,7 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
> > >   static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
> > >   {
> > > -	if (rq->rq_flags & RQF_ELV) {
> > > +	if ((rq->rq_flags & RQF_ELV) && !blk_mq_bypass_sched(rq->cmd_flags)) {
> > >   		struct elevator_queue *e = rq->q->elevator;
> > >   		if (e->type->ops.completed_request)
> > > @@ -58,7 +63,7 @@ static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
> > >   static inline void blk_mq_sched_requeue_request(struct request *rq)
> > >   {
> > > -	if (rq->rq_flags & RQF_ELV) {
> > > +	if ((rq->rq_flags & RQF_ELV) && !blk_mq_bypass_sched(rq->cmd_flags)) {
> > >   		struct request_queue *q = rq->q;
> > >   		struct elevator_queue *e = q->elevator;
> > 
> > Has it been considered not to set RQF_ELV for passthrough requests instead
> > of making the above changes?
> 
> That sounds like a good idea. It changes more behavior than what Ming is
> targeting here, but after looking through each use for RQF_ELV, I think
> not having that set really is the right thing to do in all cases for
> passthrough requests.

I did consider that approach. But:

- RQF_ELV actually means that the request & its tag is allocated from sched tags.

- if RQF_ELV is cleared for passthrough request, request may be
  allocated from sched tags(normal IO) and driver tags(passthrough) at the same time.
  This way may cause other problem, such as, breaking blk_mq_hctx_has_requests().
  Meantime it becomes not likely to optimize tags resource utilization in future,
  at least for single LUN/NS, no need to keep sched tags & driver tags
  in memory at the same time.


Thanks,
Ming
Christoph Hellwig May 16, 2023, 6:24 a.m. UTC | #4
On Tue, May 16, 2023 at 09:20:55AM +0800, Ming Lei wrote:
> > That sounds like a good idea. It changes more behavior than what Ming is
> > targeting here, but after looking through each use for RQF_ELV, I think
> > not having that set really is the right thing to do in all cases for
> > passthrough requests.
> 
> I did consider that approach. But:
> 
> - RQF_ELV actually means that the request & its tag is allocated from sched tags.
> 
> - if RQF_ELV is cleared for passthrough request, request may be
>   allocated from sched tags(normal IO) and driver tags(passthrough) at the same time.
>   This way may cause other problem, such as, breaking blk_mq_hctx_has_requests().
>   Meantime it becomes not likely to optimize tags resource utilization in future,
>   at least for single LUN/NS, no need to keep sched tags & driver tags
>   in memory at the same time.

Then make that obvious.  That is:

 - rename RQF_ELV to RQV_SCHED_TAGS
 - add the RQV_SCHED_TAGS check to your blk_mq_bypass_sched helper.
   I'd also invert the return value and rename it to someting like
   blk_rq_use_sched.
Ming Lei May 16, 2023, 8:39 a.m. UTC | #5
On Tue, May 16, 2023 at 08:24:09AM +0200, Christoph Hellwig wrote:
> On Tue, May 16, 2023 at 09:20:55AM +0800, Ming Lei wrote:
> > > That sounds like a good idea. It changes more behavior than what Ming is
> > > targeting here, but after looking through each use for RQF_ELV, I think
> > > not having that set really is the right thing to do in all cases for
> > > passthrough requests.
> > 
> > I did consider that approach. But:
> > 
> > - RQF_ELV actually means that the request & its tag is allocated from sched tags.
> > 
> > - if RQF_ELV is cleared for passthrough request, request may be
> >   allocated from sched tags(normal IO) and driver tags(passthrough) at the same time.
> >   This way may cause other problem, such as, breaking blk_mq_hctx_has_requests().
> >   Meantime it becomes not likely to optimize tags resource utilization in future,
> >   at least for single LUN/NS, no need to keep sched tags & driver tags
> >   in memory at the same time.
> 
> Then make that obvious.  That is:
> 
>  - rename RQF_ELV to RQV_SCHED_TAGS
>  - add the RQV_SCHED_TAGS check to your blk_mq_bypass_sched helper.
>    I'd also invert the return value and rename it to someting like
>    blk_rq_use_sched.

I can understand the point, but it may not be done by single flag, so
how about the following change?

diff --git a/block/blk-mq.c b/block/blk-mq.c
index d1b4aae43cf9..eddc2b5f3319 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -354,7 +354,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 		data->rq_flags |= RQF_IO_STAT;
 	rq->rq_flags = data->rq_flags;
 
-	if (!(data->rq_flags & RQF_ELV)) {
+	if (!(data->rq_flags & RQF_SCHED_TAGS)) {
 		rq->tag = tag;
 		rq->internal_tag = BLK_MQ_NO_TAG;
 	} else {
@@ -392,8 +392,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 		INIT_HLIST_NODE(&rq->hash);
 		RB_CLEAR_NODE(&rq->rb_node);
 
-		if (!op_is_flush(data->cmd_flags) &&
-		    e->type->ops.prepare_request) {
+		if (e->type->ops.prepare_request) {
 			e->type->ops.prepare_request(rq);
 			rq->rq_flags |= RQF_ELVPRIV;
 		}
@@ -451,15 +450,19 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
 	if (q->elevator) {
 		struct elevator_queue *e = q->elevator;
 
-		data->rq_flags |= RQF_ELV;
+		data->rq_flags |= RQF_SCHED_TAGS;
+
+		/* both flush and passthrough request can't go into scheduler */
+		if (!op_is_flush(data->cmd_flags) &&
+		    !blk_op_is_passthrough(data->cmd_flags))
+			data->rq_flags |= RQF_ELV;
 
 		/*
 		 * Flush/passthrough requests are special and go directly to the
 		 * dispatch list. Don't include reserved tags in the
 		 * limiting, as it isn't useful.
 		 */
-		if (!op_is_flush(data->cmd_flags) &&
-		    !blk_op_is_passthrough(data->cmd_flags) &&
+		if ((data->rq_flags & RQF_ELV) &&
 		    e->type->ops.limit_depth &&
 		    !(data->flags & BLK_MQ_REQ_RESERVED))
 			e->type->ops.limit_depth(data->cmd_flags, data);
@@ -468,7 +471,7 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
 retry:
 	data->ctx = blk_mq_get_ctx(q);
 	data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
-	if (!(data->rq_flags & RQF_ELV))
+	if (!(data->rq_flags & RQF_SCHED_TAGS))
 		blk_mq_tag_busy(data->hctx);
 
 	if (data->flags & BLK_MQ_REQ_RESERVED)
@@ -651,7 +654,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
 	if (!q->elevator)
 		blk_mq_tag_busy(data.hctx);
 	else
-		data.rq_flags |= RQF_ELV;
+		data.rq_flags |= RQF_SCHED_TAGS;
 
 	if (flags & BLK_MQ_REQ_RESERVED)
 		data.rq_flags |= RQF_RESV;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 06caacd77ed6..b4910a6471b7 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -61,7 +61,8 @@ typedef __u32 __bitwise req_flags_t;
 #define RQF_TIMED_OUT		((__force req_flags_t)(1 << 21))
 /* queue has elevator attached */
 #define RQF_ELV			((__force req_flags_t)(1 << 22))
-#define RQF_RESV			((__force req_flags_t)(1 << 23))
+#define RQF_RESV		((__force req_flags_t)(1 << 23))
+#define RQF_SCHED_TAGS		((__force req_flags_t)(1 << 24))
 
 /* flags that prevent us from merging requests: */
 #define RQF_NOMERGE_FLAGS \



Thanks,
Ming
Keith Busch May 16, 2023, 2:47 p.m. UTC | #6
On Tue, May 16, 2023 at 09:20:55AM +0800, Ming Lei wrote:
> On Mon, May 15, 2023 at 02:22:18PM -0600, Keith Busch wrote:
> > On Mon, May 15, 2023 at 08:52:38AM -0700, Bart Van Assche wrote:
> > > On 5/15/23 07:46, Ming Lei wrote:
> > > > @@ -48,7 +53,7 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
> > > >   static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
> > > >   {
> > > > -	if (rq->rq_flags & RQF_ELV) {
> > > > +	if ((rq->rq_flags & RQF_ELV) && !blk_mq_bypass_sched(rq->cmd_flags)) {
> > > >   		struct elevator_queue *e = rq->q->elevator;
> > > >   		if (e->type->ops.completed_request)
> > > > @@ -58,7 +63,7 @@ static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
> > > >   static inline void blk_mq_sched_requeue_request(struct request *rq)
> > > >   {
> > > > -	if (rq->rq_flags & RQF_ELV) {
> > > > +	if ((rq->rq_flags & RQF_ELV) && !blk_mq_bypass_sched(rq->cmd_flags)) {
> > > >   		struct request_queue *q = rq->q;
> > > >   		struct elevator_queue *e = q->elevator;
> > > 
> > > Has it been considered not to set RQF_ELV for passthrough requests instead
> > > of making the above changes?
> > 
> > That sounds like a good idea. It changes more behavior than what Ming is
> > targeting here, but after looking through each use for RQF_ELV, I think
> > not having that set really is the right thing to do in all cases for
> > passthrough requests.
> 
> I did consider that approach. But:
> 
> - RQF_ELV actually means that the request & its tag is allocated from sched tags.
> 
> - if RQF_ELV is cleared for passthrough request, request may be
>   allocated from sched tags(normal IO) and driver tags(passthrough) at the same time.
>   This way may cause other problem, such as, breaking blk_mq_hctx_has_requests().
>   Meantime it becomes not likely to optimize tags resource utilization in future,
>   at least for single LUN/NS, no need to keep sched tags & driver tags
>   in memory at the same time.

Isn't that similar to multiple namespaces where some use elevator and
others use 'none'? They're all contenting for the same shared driver
tags with racing 'has_requests()'.

And the passthrough case is special with users of that interface taking
on a greater responsibility and generally want the kernel out of the
way. I don't think anyone would purposefully run a tag intense workload
through that engine at the same time as using a generic one with the
scheduler. It definitely should still work, but it doesn't need to be
fair, right?
Ming Lei May 17, 2023, 3:26 a.m. UTC | #7
On Tue, May 16, 2023 at 08:47:46AM -0600, Keith Busch wrote:
> On Tue, May 16, 2023 at 09:20:55AM +0800, Ming Lei wrote:
> > On Mon, May 15, 2023 at 02:22:18PM -0600, Keith Busch wrote:
> > > On Mon, May 15, 2023 at 08:52:38AM -0700, Bart Van Assche wrote:
> > > > On 5/15/23 07:46, Ming Lei wrote:
> > > > > @@ -48,7 +53,7 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
> > > > >   static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
> > > > >   {
> > > > > -	if (rq->rq_flags & RQF_ELV) {
> > > > > +	if ((rq->rq_flags & RQF_ELV) && !blk_mq_bypass_sched(rq->cmd_flags)) {
> > > > >   		struct elevator_queue *e = rq->q->elevator;
> > > > >   		if (e->type->ops.completed_request)
> > > > > @@ -58,7 +63,7 @@ static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
> > > > >   static inline void blk_mq_sched_requeue_request(struct request *rq)
> > > > >   {
> > > > > -	if (rq->rq_flags & RQF_ELV) {
> > > > > +	if ((rq->rq_flags & RQF_ELV) && !blk_mq_bypass_sched(rq->cmd_flags)) {
> > > > >   		struct request_queue *q = rq->q;
> > > > >   		struct elevator_queue *e = q->elevator;
> > > > 
> > > > Has it been considered not to set RQF_ELV for passthrough requests instead
> > > > of making the above changes?
> > > 
> > > That sounds like a good idea. It changes more behavior than what Ming is
> > > targeting here, but after looking through each use for RQF_ELV, I think
> > > not having that set really is the right thing to do in all cases for
> > > passthrough requests.
> > 
> > I did consider that approach. But:
> > 
> > - RQF_ELV actually means that the request & its tag is allocated from sched tags.
> > 
> > - if RQF_ELV is cleared for passthrough request, request may be
> >   allocated from sched tags(normal IO) and driver tags(passthrough) at the same time.
> >   This way may cause other problem, such as, breaking blk_mq_hctx_has_requests().
> >   Meantime it becomes not likely to optimize tags resource utilization in future,
> >   at least for single LUN/NS, no need to keep sched tags & driver tags
> >   in memory at the same time.
> 
> Isn't that similar to multiple namespaces where some use elevator and
> others use 'none'? They're all contenting for the same shared driver
> tags with racing 'has_requests()'.

It is similar, but not same.

So far, for each single request queue, we never support to allocate
request/tag from both sched tags and driver tags at the same.

If we clear RQF_ELV simply for pt request, at least:

1) blk_mq_hctx_has_requests() is broken since this function only checks
sched tags in case of q->elevator

2) q->nr_requests may not be respected any more in case of q->elevator

> And the passthrough case is special with users of that interface taking
> on a greater responsibility and generally want the kernel out of the
> way. I don't think anyone would purposefully run a tag intense workload
> through that engine at the same time as using a generic one with the
> scheduler. It definitely should still work, but it doesn't need to be
> fair, right?

I guess it may work, but question is that what we can get from this kind
of big change? And I think this approach may be one following work if it is
proved as useful.


Thanks,
Ming
Christoph Hellwig May 17, 2023, 7:22 a.m. UTC | #8
On Tue, May 16, 2023 at 04:39:05PM +0800, Ming Lei wrote:
> I can understand the point, but it may not be done by single flag,

Can you explain why?  Note that we also already have RQF_ELVPRIV for
any request that has elevator private data.  I don't really think we
need a third flag.
Ming Lei May 17, 2023, 8:58 a.m. UTC | #9
On Wed, May 17, 2023 at 09:22:18AM +0200, Christoph Hellwig wrote:
> On Tue, May 16, 2023 at 04:39:05PM +0800, Ming Lei wrote:
> > I can understand the point, but it may not be done by single flag,
> 
> Can you explain why?  Note that we also already have RQF_ELVPRIV for
> any request that has elevator private data.  I don't really think we
> need a third flag.

RQF_ELVPRIV isn't same with RQF_ELV, and follows the two's relationship:

	RQF_ELVPRIV == (RQF_ELV && non_flush_pt_req && !e->type->ops.prepare_request)

RQF_ELVPRIV can be replaced with the above expression to save one flag.

RQF_ELV isn't same with RQF_SCHED_TAGS too, RQF_ELV should be used for
checking if elevator callback is needed, and RQF_SCHED_TAGS is for allocating
req/tag and dealing with tags busy things.

In case of q->elevator, RQF_SCHED_TAGS is always set, but

- for pt/flush request, RQF_ELV is cleared.
- for other request, RQF_ELV are set

Then we can avoid any elevator callback for pt/flush request.


thanks, 
Ming
Keith Busch May 17, 2023, 6:13 p.m. UTC | #10
On Wed, May 17, 2023 at 11:26:32AM +0800, Ming Lei wrote:
> On Tue, May 16, 2023 at 08:47:46AM -0600, Keith Busch wrote:
> 
> > And the passthrough case is special with users of that interface taking
> > on a greater responsibility and generally want the kernel out of the
> > way. I don't think anyone would purposefully run a tag intense workload
> > through that engine at the same time as using a generic one with the
> > scheduler. It definitely should still work, but it doesn't need to be
> > fair, right?
> 
> I guess it may work, but question is that what we can get from this kind
> of big change? And I think this approach may be one following work if it is
> proved as useful.

I'm just trying to remove any need for side channels to bypass block
layer functionality, like this one:

  http://lists.infradead.org/pipermail/linux-nvme/2023-April/039522.html
Ming Lei May 18, 2023, 1:22 a.m. UTC | #11
On Wed, May 17, 2023 at 12:13:04PM -0600, Keith Busch wrote:
> On Wed, May 17, 2023 at 11:26:32AM +0800, Ming Lei wrote:
> > On Tue, May 16, 2023 at 08:47:46AM -0600, Keith Busch wrote:
> > 
> > > And the passthrough case is special with users of that interface taking
> > > on a greater responsibility and generally want the kernel out of the
> > > way. I don't think anyone would purposefully run a tag intense workload
> > > through that engine at the same time as using a generic one with the
> > > scheduler. It definitely should still work, but it doesn't need to be
> > > fair, right?
> > 
> > I guess it may work, but question is that what we can get from this kind
> > of big change? And I think this approach may be one following work if it is
> > proved as useful.
> 
> I'm just trying to remove any need for side channels to bypass block
> layer functionality, like this one:
> 
>   http://lists.infradead.org/pipermail/linux-nvme/2023-April/039522.html
> 

In "io_uring attached nvme queue" patchset, Kanchan tried to bypass
request/bio completely, and same with blk-mq's pt code path.

You mean you'd suggest to still reuse req/bio & blk-mq pt code path
for "io_uring attached nvme queue"?

Cc Kanchan.

Thanks,
Ming
diff mbox series

Patch

diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 7c3cbad17f30..4aa879749843 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -22,6 +22,11 @@  int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
 void blk_mq_sched_free_rqs(struct request_queue *q);
 
+static inline bool blk_mq_bypass_sched(blk_opf_t cmd_flags)
+{
+	return op_is_flush(cmd_flags) || blk_op_is_passthrough(cmd_flags);
+}
+
 static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
 {
 	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
@@ -48,7 +53,7 @@  blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
 
 static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
 {
-	if (rq->rq_flags & RQF_ELV) {
+	if ((rq->rq_flags & RQF_ELV) && !blk_mq_bypass_sched(rq->cmd_flags)) {
 		struct elevator_queue *e = rq->q->elevator;
 
 		if (e->type->ops.completed_request)
@@ -58,7 +63,7 @@  static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
 
 static inline void blk_mq_sched_requeue_request(struct request *rq)
 {
-	if (rq->rq_flags & RQF_ELV) {
+	if ((rq->rq_flags & RQF_ELV) && !blk_mq_bypass_sched(rq->cmd_flags)) {
 		struct request_queue *q = rq->q;
 		struct elevator_queue *e = q->elevator;
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b4aaf42f1125..bd80fe3aa0c3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -392,7 +392,7 @@  static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 		INIT_HLIST_NODE(&rq->hash);
 		RB_CLEAR_NODE(&rq->rb_node);
 
-		if (!op_is_flush(data->cmd_flags) &&
+		if (!blk_mq_bypass_sched(data->cmd_flags) &&
 		    e->type->ops.prepare_request) {
 			e->type->ops.prepare_request(rq);
 			rq->rq_flags |= RQF_ELVPRIV;
@@ -458,8 +458,7 @@  static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
 		 * dispatch list. Don't include reserved tags in the
 		 * limiting, as it isn't useful.
 		 */
-		if (!op_is_flush(data->cmd_flags) &&
-		    !blk_op_is_passthrough(data->cmd_flags) &&
+		if (!blk_mq_bypass_sched(data->cmd_flags) &&
 		    e->type->ops.limit_depth &&
 		    !(data->flags & BLK_MQ_REQ_RESERVED))
 			e->type->ops.limit_depth(data->cmd_flags, data);