Message ID | 1538044984-2147-2-git-send-email-jianchao.w.wang@oracle.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | blk-mq: two fixes about updating hw queues | expand |
On Thu, Sep 27, 2018 at 06:43:03PM +0800, Jianchao Wang wrote: > blk-mq debugfs and sysfs entries need to be removed before updating > queue map, otherwise, we get get wrong result there. This patch fixes > it and remove the redundant debugfs and sysfs register/unregister > operations during __blk_mq_update_nr_hw_queues. > > Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com> > --- > block/blk-mq.c | 39 ++++++++++++--------------------------- > 1 file changed, 12 insertions(+), 27 deletions(-) > > diff --git a/block/blk-mq.c b/block/blk-mq.c > index 85a1c1a..6356455 100644 > --- a/block/blk-mq.c > +++ b/block/blk-mq.c > @@ -2137,8 +2137,6 @@ static void blk_mq_exit_hctx(struct request_queue *q, > struct blk_mq_tag_set *set, > struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) > { > - blk_mq_debugfs_unregister_hctx(hctx); > - > if (blk_mq_hw_queue_mapped(hctx)) > blk_mq_tag_idle(hctx); > > @@ -2165,6 +2163,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q, > queue_for_each_hw_ctx(q, hctx, i) { > if (i == nr_queue) > break; > + blk_mq_debugfs_unregister_hctx(hctx); > blk_mq_exit_hctx(q, set, hctx, i); > } > } > @@ -2222,8 +2221,6 @@ static int blk_mq_init_hctx(struct request_queue *q, > if (hctx->flags & BLK_MQ_F_BLOCKING) > init_srcu_struct(hctx->srcu); > > - blk_mq_debugfs_register_hctx(q, hctx); > - > return 0; > > free_fq: > @@ -2512,8 +2509,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, > int i, j; > struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; > > - blk_mq_sysfs_unregister(q); > - > /* protect against switching io scheduler */ > mutex_lock(&q->sysfs_lock); > for (i = 0; i < set->nr_hw_queues; i++) { > @@ -2561,7 +2556,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, > } > q->nr_hw_queues = i; > mutex_unlock(&q->sysfs_lock); > - blk_mq_sysfs_register(q); > } > > struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, > @@ -2659,25 +2653,6 @@ void blk_mq_free_queue(struct request_queue *q) > blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); > } Looks both blk_mq_sysfs_register() and blk_mq_debugfs_register_hctx() are removed from the path of blk_mq_init_allocated_queue(), so where are the two registered for a freshly new queue? Thanks, Ming
Hi Ming On 09/29/2018 10:49 AM, Ming Lei wrote: > Looks both blk_mq_sysfs_register() and blk_mq_debugfs_register_hctx() > are removed from the path of blk_mq_init_allocated_queue(), so where are > the two registered for a freshly new queue? Initially, both of them are registered by following path. __device_add_disk -> blk_register_queue if (q->mq_ops) { __blk_mq_register_dev(dev, q); blk_mq_debugfs_register(q); } blk_mq_init_allocated_queue won't do any thing. Thanks Jianchao
On Thu, Sep 27, 2018 at 06:43:03PM +0800, Jianchao Wang wrote: > blk-mq debugfs and sysfs entries need to be removed before updating > queue map, otherwise, we get get wrong result there. This patch fixes > it and remove the redundant debugfs and sysfs register/unregister > operations during __blk_mq_update_nr_hw_queues. > > Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com> > --- > block/blk-mq.c | 39 ++++++++++++--------------------------- > 1 file changed, 12 insertions(+), 27 deletions(-) > > diff --git a/block/blk-mq.c b/block/blk-mq.c > index 85a1c1a..6356455 100644 > --- a/block/blk-mq.c > +++ b/block/blk-mq.c > @@ -2137,8 +2137,6 @@ static void blk_mq_exit_hctx(struct request_queue *q, > struct blk_mq_tag_set *set, > struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) > { > - blk_mq_debugfs_unregister_hctx(hctx); > - > if (blk_mq_hw_queue_mapped(hctx)) > blk_mq_tag_idle(hctx); > > @@ -2165,6 +2163,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q, > queue_for_each_hw_ctx(q, hctx, i) { > if (i == nr_queue) > break; > + blk_mq_debugfs_unregister_hctx(hctx); > blk_mq_exit_hctx(q, set, hctx, i); > } > } > @@ -2222,8 +2221,6 @@ static int blk_mq_init_hctx(struct request_queue *q, > if (hctx->flags & BLK_MQ_F_BLOCKING) > init_srcu_struct(hctx->srcu); > > - blk_mq_debugfs_register_hctx(q, hctx); > - > return 0; > > free_fq: > @@ -2512,8 +2509,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, > int i, j; > struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; > > - blk_mq_sysfs_unregister(q); > - > /* protect against switching io scheduler */ > mutex_lock(&q->sysfs_lock); > for (i = 0; i < set->nr_hw_queues; i++) { > @@ -2561,7 +2556,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, > } > q->nr_hw_queues = i; > mutex_unlock(&q->sysfs_lock); > - blk_mq_sysfs_register(q); > } > > struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, > @@ -2659,25 +2653,6 @@ void blk_mq_free_queue(struct request_queue *q) > blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); > } > > -/* Basically redo blk_mq_init_queue with queue frozen */ > -static void blk_mq_queue_reinit(struct request_queue *q) > -{ > - WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); > - > - blk_mq_debugfs_unregister_hctxs(q); > - blk_mq_sysfs_unregister(q); > - > - /* > - * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe > - * we should change hctx numa_node according to the new topology (this > - * involves freeing and re-allocating memory, worth doing?) > - */ > - blk_mq_map_swqueue(q); > - > - blk_mq_sysfs_register(q); > - blk_mq_debugfs_register_hctxs(q); > -} > - > static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) > { > int i; > @@ -2987,11 +2962,21 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, > if (!blk_mq_elv_switch_none(&head, q)) > goto switch_back; > > + list_for_each_entry(q, &set->tag_list, tag_set_list) { > + blk_mq_debugfs_unregister_hctxs(q); > + blk_mq_sysfs_unregister(q); > + } > + > set->nr_hw_queues = nr_hw_queues; > blk_mq_update_queue_map(set); > list_for_each_entry(q, &set->tag_list, tag_set_list) { > blk_mq_realloc_hw_ctxs(set, q); > - blk_mq_queue_reinit(q); > + blk_mq_map_swqueue(q); > + } > + > + list_for_each_entry(q, &set->tag_list, tag_set_list) { > + blk_mq_sysfs_register(q); > + blk_mq_debugfs_register_hctxs(q); > } > > switch_back: > -- > 2.7.4 > Reviewed-by: Ming Lei <ming.lei@redhat.com> Thanks, Ming
diff --git a/block/blk-mq.c b/block/blk-mq.c index 85a1c1a..6356455 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2137,8 +2137,6 @@ static void blk_mq_exit_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { - blk_mq_debugfs_unregister_hctx(hctx); - if (blk_mq_hw_queue_mapped(hctx)) blk_mq_tag_idle(hctx); @@ -2165,6 +2163,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q, queue_for_each_hw_ctx(q, hctx, i) { if (i == nr_queue) break; + blk_mq_debugfs_unregister_hctx(hctx); blk_mq_exit_hctx(q, set, hctx, i); } } @@ -2222,8 +2221,6 @@ static int blk_mq_init_hctx(struct request_queue *q, if (hctx->flags & BLK_MQ_F_BLOCKING) init_srcu_struct(hctx->srcu); - blk_mq_debugfs_register_hctx(q, hctx); - return 0; free_fq: @@ -2512,8 +2509,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, int i, j; struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; - blk_mq_sysfs_unregister(q); - /* protect against switching io scheduler */ mutex_lock(&q->sysfs_lock); for (i = 0; i < set->nr_hw_queues; i++) { @@ -2561,7 +2556,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, } q->nr_hw_queues = i; mutex_unlock(&q->sysfs_lock); - blk_mq_sysfs_register(q); } struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, @@ -2659,25 +2653,6 @@ void blk_mq_free_queue(struct request_queue *q) blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); } -/* Basically redo blk_mq_init_queue with queue frozen */ -static void blk_mq_queue_reinit(struct request_queue *q) -{ - WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); - - blk_mq_debugfs_unregister_hctxs(q); - blk_mq_sysfs_unregister(q); - - /* - * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe - * we should change hctx numa_node according to the new topology (this - * involves freeing and re-allocating memory, worth doing?) - */ - blk_mq_map_swqueue(q); - - blk_mq_sysfs_register(q); - blk_mq_debugfs_register_hctxs(q); -} - static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) { int i; @@ -2987,11 +2962,21 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, if (!blk_mq_elv_switch_none(&head, q)) goto switch_back; + list_for_each_entry(q, &set->tag_list, tag_set_list) { + blk_mq_debugfs_unregister_hctxs(q); + blk_mq_sysfs_unregister(q); + } + set->nr_hw_queues = nr_hw_queues; blk_mq_update_queue_map(set); list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_realloc_hw_ctxs(set, q); - blk_mq_queue_reinit(q); + blk_mq_map_swqueue(q); + } + + list_for_each_entry(q, &set->tag_list, tag_set_list) { + blk_mq_sysfs_register(q); + blk_mq_debugfs_register_hctxs(q); } switch_back:
blk-mq debugfs and sysfs entries need to be removed before updating queue map, otherwise, we get get wrong result there. This patch fixes it and remove the redundant debugfs and sysfs register/unregister operations during __blk_mq_update_nr_hw_queues. Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com> --- block/blk-mq.c | 39 ++++++++++++--------------------------- 1 file changed, 12 insertions(+), 27 deletions(-)