@@ -370,23 +370,23 @@ static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
{
struct blk_mq_hw_ctx *hctx = m->private;
- spin_lock(&hctx->lock);
- return seq_list_start(&hctx->dispatch, *pos);
+ spin_lock(hctx->dispatch_lock);
+ return seq_list_start(hctx->dispatch_list, *pos);
}
static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
{
struct blk_mq_hw_ctx *hctx = m->private;
- return seq_list_next(v, &hctx->dispatch, pos);
+ return seq_list_next(v, hctx->dispatch_list, pos);
}
static void hctx_dispatch_stop(struct seq_file *m, void *v)
- __releases(&hctx->lock)
+ __releases(hctx->dispatch_lock)
{
struct blk_mq_hw_ctx *hctx = m->private;
- spin_unlock(&hctx->lock);
+ spin_unlock(hctx->dispatch_lock);
}
static const struct seq_operations hctx_dispatch_seq_ops = {
@@ -1925,8 +1925,11 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
static void blk_mq_init_dispatch(struct request_queue *q,
struct blk_mq_hw_ctx *hctx)
{
- spin_lock_init(&hctx->lock);
- INIT_LIST_HEAD(&hctx->dispatch);
+ hctx->dispatch_lock = &hctx->lock;
+ hctx->dispatch_list = &hctx->dispatch;
+
+ spin_lock_init(hctx->dispatch_lock);
+ INIT_LIST_HEAD(hctx->dispatch_list);
}
static int blk_mq_init_hctx(struct request_queue *q,
@@ -152,38 +152,38 @@ static inline void blk_mq_hctx_clear_busy(struct blk_mq_hw_ctx *hctx)
static inline bool blk_mq_has_dispatch_rqs(struct blk_mq_hw_ctx *hctx)
{
- return !list_empty_careful(&hctx->dispatch);
+ return !list_empty_careful(hctx->dispatch_list);
}
static inline void blk_mq_add_rq_to_dispatch(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
- spin_lock(&hctx->lock);
- list_add(&rq->queuelist, &hctx->dispatch);
- spin_unlock(&hctx->lock);
+ spin_lock(hctx->dispatch_lock);
+ list_add(&rq->queuelist, hctx->dispatch_list);
+ spin_unlock(hctx->dispatch_lock);
}
static inline void blk_mq_add_list_to_dispatch(struct blk_mq_hw_ctx *hctx,
struct list_head *list)
{
- spin_lock(&hctx->lock);
- list_splice_init(list, &hctx->dispatch);
- spin_unlock(&hctx->lock);
+ spin_lock(hctx->dispatch_lock);
+ list_splice_init(list, hctx->dispatch_list);
+ spin_unlock(hctx->dispatch_lock);
}
static inline void blk_mq_add_list_to_dispatch_tail(struct blk_mq_hw_ctx *hctx,
struct list_head *list)
{
- spin_lock(&hctx->lock);
- list_splice_tail_init(list, &hctx->dispatch);
- spin_unlock(&hctx->lock);
+ spin_lock(hctx->dispatch_lock);
+ list_splice_tail_init(list, hctx->dispatch_list);
+ spin_unlock(hctx->dispatch_lock);
}
static inline void blk_mq_take_list_from_dispatch(struct blk_mq_hw_ctx *hctx,
struct list_head *list)
{
- spin_lock(&hctx->lock);
- list_splice_init(&hctx->dispatch, list);
+ spin_lock(hctx->dispatch_lock);
+ list_splice_init(hctx->dispatch_list, list);
/*
* BUSY won't be cleared until all requests
@@ -191,7 +191,7 @@ static inline void blk_mq_take_list_from_dispatch(struct blk_mq_hw_ctx *hctx,
*/
if (!list_empty(list))
blk_mq_hctx_set_busy(hctx);
- spin_unlock(&hctx->lock);
+ spin_unlock(hctx->dispatch_lock);
}
#endif
@@ -22,6 +22,9 @@ struct blk_mq_hw_ctx {
unsigned long flags; /* BLK_MQ_F_* flags */
+ spinlock_t *dispatch_lock;
+ struct list_head *dispatch_list;
+
void *sched_data;
struct request_queue *queue;
struct blk_flush_queue *fq;
Prepare to support per-request-queue dispatch list, so introduce dispatch lock and list for avoiding to do runtime check. Signed-off-by: Ming Lei <ming.lei@redhat.com> --- block/blk-mq-debugfs.c | 10 +++++----- block/blk-mq.c | 7 +++++-- block/blk-mq.h | 26 +++++++++++++------------- include/linux/blk-mq.h | 3 +++ 4 files changed, 26 insertions(+), 20 deletions(-)