@@ -401,14 +401,17 @@ EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
* reserved) where rq is a pointer to a request and hctx points
* to the hardware queue associated with the request. 'reserved'
* indicates whether or not @rq is a reserved request.
- * @priv: Will be passed as third argument to @fn.
+ *@check_break: Pointer to the function that will callbed for earch hctx on @q.
+ * @check_break will break the loop for hctx when it return false,
+ * if you want to iterate all hctx, set it to NULL.
+ * @priv: Will be passed as third argument to @fn, or arg to @check_break
*
* Note: if @q->tag_set is shared with other request queues then @fn will be
* called for all requests on all queues that share that tag set and not only
* for requests associated with @q.
*/
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
- void *priv)
+ check_break_fn *check_break, void *priv)
{
struct blk_mq_hw_ctx *hctx;
int i;
@@ -434,7 +437,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
if (tags->nr_reserved_tags)
bt_for_each(hctx, tags->breserved_tags, fn, priv, true);
bt_for_each(hctx, tags->bitmap_tags, fn, priv, false);
+
+ if (check_break && !check_break(priv))
+ goto out;
}
+out:
blk_queue_exit(q);
}
@@ -42,7 +42,7 @@ extern void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set,
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
- void *priv);
+ check_break_fn *check_break, void *priv);
void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
void *priv);
@@ -115,7 +115,7 @@ unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
{
struct mq_inflight mi = { .part = part };
- blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
+ blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, NULL, &mi);
return mi.inflight[0] + mi.inflight[1];
}
@@ -125,11 +125,22 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
{
struct mq_inflight mi = { .part = part };
- blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
+ blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, NULL, &mi);
inflight[0] = mi.inflight[0];
inflight[1] = mi.inflight[1];
}
+static bool blk_mq_part_check_break(void *priv)
+{
+ struct mq_inflight *mi = priv;
+
+ /* return false to stop interate other hctx */
+ if (mi->inflight[0] || mi->inflight[1])
+ return false;
+
+ return true;
+}
+
static bool blk_mq_part_check_inflight(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv,
bool reserved)
@@ -151,7 +162,8 @@ bool blk_mq_part_is_in_flight(struct request_queue *q, struct hd_struct *part)
mi.inflight[0] = mi.inflight[1] = 0;
- blk_mq_queue_tag_busy_iter(q, blk_mq_part_check_inflight, &mi);
+ blk_mq_queue_tag_busy_iter(q, blk_mq_part_check_inflight,
+ blk_mq_part_check_break, &mi);
return mi.inflight[0] + mi.inflight[1] > 0;
}
@@ -909,11 +921,23 @@ static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
return true;
}
+static bool blk_mq_rq_check_break(void *priv)
+{
+ bool *busy = priv;
+
+ /* return false to stop interate other hctx */
+ if (*busy)
+ return false;
+
+ return true;
+}
+
bool blk_mq_queue_inflight(struct request_queue *q)
{
bool busy = false;
- blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
+ blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight,
+ blk_mq_rq_check_break, &busy);
return busy;
}
EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
@@ -1018,7 +1042,7 @@ static void blk_mq_timeout_work(struct work_struct *work)
if (!percpu_ref_tryget(&q->q_usage_counter))
return;
- blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
+ blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, NULL, &next);
if (next != 0) {
mod_timer(&q->timeout, next);
@@ -280,6 +280,7 @@ struct blk_mq_queue_data {
typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
bool);
typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
+typedef bool (check_break_fn)(void *);
/**
* struct blk_mq_ops - Callback functions that implements block driver
For blk_mq_part_is_in_inflight and blk_mq_queue_inflight they do not care how many inflight IOs, so they stop interate other hxtc when find a request meets their requirement. Some cpu cycles can be saved in such way. Signed-off-by: Weiping Zhang <zhangweiping@didiglobal.com> --- block/blk-mq-tag.c | 11 +++++++++-- block/blk-mq-tag.h | 2 +- block/blk-mq.c | 34 +++++++++++++++++++++++++++++----- include/linux/blk-mq.h | 1 + 4 files changed, 40 insertions(+), 8 deletions(-)