@@ -214,16 +214,7 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
-/**
- * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
- * @q: request queue.
- *
- * Note: this function does not prevent that the struct request end_io()
- * callback function is invoked. Once this function is returned, we make
- * sure no dispatch can happen until the queue is unquiesced via
- * blk_mq_unquiesce_queue().
- */
-void blk_mq_quiesce_queue(struct request_queue *q)
+static void __blk_mq_quiesce_queue(struct request_queue *q, bool wait)
{
bool blocking = !!(q->tag_set->flags & BLK_MQ_F_BLOCKING);
bool was_quiesced =__blk_mq_quiesce_queue_nowait(q);
@@ -231,6 +222,9 @@ void blk_mq_quiesce_queue(struct request_queue *q)
if (!was_quiesced && blocking)
percpu_ref_kill(&q->dispatch_counter);
+ if (!wait)
+ return;
+
/*
* In case of F_BLOCKING, if driver unquiesces its queue being
* quiesced, it can cause bigger trouble, and we simply return &
@@ -244,6 +238,20 @@ void blk_mq_quiesce_queue(struct request_queue *q)
else
synchronize_rcu();
}
+
+/*
+ * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
+ * @q: request queue.
+ *
+ * Note: this function does not prevent that the struct request end_io()
+ * callback function is invoked. Once this function is returned, we make
+ * sure no dispatch can happen until the queue is unquiesced via
+ * blk_mq_unquiesce_queue().
+ */
+void blk_mq_quiesce_queue(struct request_queue *q)
+{
+ __blk_mq_quiesce_queue(q, true);
+}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
/*
@@ -265,6 +273,37 @@ void blk_mq_unquiesce_queue(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
+void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set)
+{
+ struct request_queue *q;
+
+ mutex_lock(&set->tag_list_lock);
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+ __blk_mq_quiesce_queue(q, false);
+
+ /* wait until all queues' quiesce is done */
+ if (set->flags & BLK_MQ_F_BLOCKING) {
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+ wait_event(q->mq_quiesce_wq,
+ percpu_ref_is_zero(&q->dispatch_counter));
+ } else {
+ synchronize_rcu();
+ }
+ mutex_unlock(&set->tag_list_lock);
+}
+EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset);
+
+void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set)
+{
+ struct request_queue *q;
+
+ mutex_lock(&set->tag_list_lock);
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+ blk_mq_unquiesce_queue(q);
+ mutex_unlock(&set->tag_list_lock);
+}
+EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset);
+
void blk_mq_wake_waiters(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
@@ -519,6 +519,8 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
+void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
+void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
unsigned int blk_mq_rq_cpu(struct request *rq);