@@ -63,6 +63,12 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
return bucket;
}
+static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
+ blk_qc_t qc)
+{
+ return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT];
+}
+
/*
* Check if any of the ctx, dispatch list or elevator
* have pending work in this hardware queue.
@@ -3855,7 +3861,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t qc, bool spin)
{
- struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(qc)];
+ struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc);
long state = current->state;
hctx->poll_considered++;
@@ -3909,7 +3915,7 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
if (current->plug)
blk_flush_plug_list(current->plug, false);
- hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
+ hctx = blk_qc_to_hctx(q, cookie);
/*
* If we sleep, have the caller restart the poll loop to reset
@@ -515,11 +515,6 @@ static inline bool blk_qc_t_valid(blk_qc_t cookie)
return cookie != BLK_QC_T_NONE;
}
-static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
-{
- return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
-}
-
static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
{
return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
Add a helper to get the hctx from a request_queue and cookie, and fold the blk_qc_t_to_queue_num helper into it as no other callers are left. Signed-off-by: Christoph Hellwig <hch@lst.de> --- block/blk-mq.c | 10 ++++++++-- include/linux/blk_types.h | 5 ----- 2 files changed, 8 insertions(+), 7 deletions(-)