@@ -235,6 +235,34 @@ void blk_mq_quiesce_queue(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
+/*
+ * blk_mq_quiesce_queue_async - do not wait until all ongoing dispatches
+ * completed, used for sync lots of queues together to reduce wait time.
+ * Must be used together with blk_mq_quiesce_queue_async_wait.
+ * Caution: do not support concurrent.
+ */
+void blk_mq_quiesce_queue_async(struct request_queue *q, atomic_t *count)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+
+ blk_mq_quiesce_queue_nowait(q);
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (hctx->flags & BLK_MQ_F_BLOCKING)
+ synchronize_srcu_async(hctx->srcu, count);
+ }
+
+}
+EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_async);
+
+void blk_mq_quiesce_queue_async_wait(atomic_t *count)
+{
+ synchronize_rcu();
+ while (atomic_read(count))
+ schedule();
+}
+EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_async_wait);
+
/*
* blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
* @q: request queue.
@@ -515,6 +515,8 @@ void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_quiesce_queue(struct request_queue *q);
+void blk_mq_quiesce_queue_async(struct request_queue *q, atomic_t *count);
+void blk_mq_quiesce_queue_async_wait(atomic_t *count);
void blk_mq_unquiesce_queue(struct request_queue *q);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
In some scenarios we need quiesce lots of queues together, if quiesce one by one, may need long time. Introduce async mechanism for quiesce queue: quiesce queues together but no wait, and then wait until all queues ongoing dispatches complete. Thus reduce serial wait time. Signed-off-by: Chao Leng <lengchao@huawei.com> --- block/blk-mq.c | 28 ++++++++++++++++++++++++++++ include/linux/blk-mq.h | 2 ++ 2 files changed, 30 insertions(+)