diff mbox series

[2/5] blk-mq: Export iterating queue requests

Message ID 20190308174006.5032-2-keith.busch@intel.com (mailing list archive)
State New, archived
Headers show
Series [1/5] blk-mq: Export reading mq request state | expand

Commit Message

Keith Busch March 8, 2019, 5:40 p.m. UTC
A driver may need to iterate a particular queue's tagged request rather
than the whole tagset.

Signed-off-by: Keith Busch <keith.busch@intel.com>
---
 block/blk-mq-tag.c     | 1 +
 block/blk-mq-tag.h     | 2 --
 include/linux/blk-mq.h | 2 ++
 3 files changed, 3 insertions(+), 2 deletions(-)

Comments

Bart Van Assche March 8, 2019, 6:08 p.m. UTC | #1
On Fri, 2019-03-08 at 10:40 -0700, Keith Busch wrote:
> A driver may need to iterate a particular queue's tagged request rather
> than the whole tagset.

Since iterating over requests triggers race conditions with request execution
please explain what use case(s) you have in mind and what your plan is to handle
such race conditions.

Thanks,

Bart.
Keith Busch March 8, 2019, 6:13 p.m. UTC | #2
On Fri, Mar 08, 2019 at 10:08:47AM -0800, Bart Van Assche wrote:
> On Fri, 2019-03-08 at 10:40 -0700, Keith Busch wrote:
> > A driver may need to iterate a particular queue's tagged request rather
> > than the whole tagset.
> 
> Since iterating over requests triggers race conditions with request execution
> please explain what use case(s) you have in mind and what your plan is to handle
> such race conditions.

That race isn't new. You should only iterate when your queues are quieced
to ensure the request sent to a callback is stable.
diff mbox series

Patch

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index a4931fc7be8a..a4ba91b332b0 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -407,6 +407,7 @@  void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
 	}
 	blk_queue_exit(q);
 }
+EXPORT_SYMBOL(blk_mq_queue_tag_busy_iter);
 
 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
 		    bool round_robin, int node)
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 61deab0b5a5a..5af7ff94b400 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -33,8 +33,6 @@  extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
 					struct blk_mq_tags **tags,
 					unsigned int depth, bool can_grow);
 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
-void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
-		void *priv);
 
 static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
 						 struct blk_mq_hw_ctx *hctx)
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b0c814bcc7e3..a64b3fdce0b0 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -321,6 +321,8 @@  bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
 		busy_tag_iter_fn *fn, void *priv);
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
+		void *priv);
 void blk_mq_freeze_queue(struct request_queue *q);
 void blk_mq_unfreeze_queue(struct request_queue *q);
 void blk_freeze_queue_start(struct request_queue *q);