Message ID | 20210329152622.173035-10-ming.lei@redhat.com (mailing list archive) |
---|---|
State | Superseded, archived |
Delegated to: | Mike Snitzer |
Headers | show |
Series | block: support bio based io polling | expand |
On 3/29/21 11:26 PM, Ming Lei wrote: > Limit at most 8 queues are polled in each blk_pull(), avoid to > add extra latency when queue depth is high. > > Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Jeffle Xu <jefflexu@linux.alibaba.com> > --- > block/blk-mq.c | 73 ++++++++++++++++++++++++++++++++++++-------------- > 1 file changed, 53 insertions(+), 20 deletions(-) > > diff --git a/block/blk-mq.c b/block/blk-mq.c > index b65f2c170fb0..414f5d99d9de 100644 > --- a/block/blk-mq.c > +++ b/block/blk-mq.c > @@ -3865,32 +3865,31 @@ static inline int blk_mq_poll_hctx(struct request_queue *q, > return ret; > } > > -static int blk_mq_poll_io(struct bio *bio) > +#define POLL_HCTX_MAX_CNT 8 > + > +static bool blk_add_unique_hctx(struct blk_mq_hw_ctx **data, int *cnt, > + struct blk_mq_hw_ctx *hctx) > { > - struct request_queue *q = bio->bi_bdev->bd_disk->queue; > - blk_qc_t cookie = bio_get_private_data(bio); > - int ret = 0; > + int i; > > - if (!bio_flagged(bio, BIO_DONE) && blk_qc_t_valid(cookie)) { > - struct blk_mq_hw_ctx *hctx = > - q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; > + for (i = 0; i < *cnt; i++) { > + if (data[i] == hctx) > + goto exit; > + } > > - ret += blk_mq_poll_hctx(q, hctx); > + if (i < POLL_HCTX_MAX_CNT) { > + data[i] = hctx; > + (*cnt)++; > } > - return ret; > + exit: > + return *cnt == POLL_HCTX_MAX_CNT; > } > > -static int blk_bio_poll_and_end_io(struct bio_grp_list *grps) > +static void blk_build_poll_queues(struct bio_grp_list *grps, > + struct blk_mq_hw_ctx **data, int *cnt) > { > - int ret = 0; > int i; > > - /* > - * Poll hw queue first. > - * > - * TODO: limit max poll times and make sure to not poll same > - * hw queue one more time. > - */ > for (i = 0; i < grps->nr_grps; i++) { > struct bio_grp_list_data *grp = &grps->head[i]; > struct bio *bio; > @@ -3898,11 +3897,29 @@ static int blk_bio_poll_and_end_io(struct bio_grp_list *grps) > if (bio_grp_list_grp_empty(grp)) > continue; > > - for (bio = grp->list.head; bio; bio = bio->bi_poll) > - ret += blk_mq_poll_io(bio); > + for (bio = grp->list.head; bio; bio = bio->bi_poll) { > + blk_qc_t cookie; > + struct blk_mq_hw_ctx *hctx; > + struct request_queue *q; > + > + if (bio_flagged(bio, BIO_DONE)) > + continue; > + cookie = bio_get_private_data(bio); > + if (!blk_qc_t_valid(cookie)) > + continue; > + > + q = bio->bi_bdev->bd_disk->queue; > + hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; > + if (blk_add_unique_hctx(data, cnt, hctx)) > + return; > + } > } > +} > + > +static void blk_bio_poll_reap_ios(struct bio_grp_list *grps) > +{ > + int i; > > - /* reap bios */ > for (i = 0; i < grps->nr_grps; i++) { > struct bio_grp_list_data *grp = &grps->head[i]; > struct bio *bio; > @@ -3927,6 +3944,22 @@ static int blk_bio_poll_and_end_io(struct bio_grp_list *grps) > } > __bio_grp_list_merge(&grp->list, &bl); > } > +} > + > +static int blk_bio_poll_and_end_io(struct bio_grp_list *grps) > +{ > + int ret = 0; > + int i; > + struct blk_mq_hw_ctx *hctx[POLL_HCTX_MAX_CNT]; > + int cnt = 0; > + > + blk_build_poll_queues(grps, hctx, &cnt); > + > + for (i = 0; i < cnt; i++) > + ret += blk_mq_poll_hctx(hctx[i]->queue, hctx[i]); > + > + blk_bio_poll_reap_ios(grps); > + > return ret; > } > >
diff --git a/block/blk-mq.c b/block/blk-mq.c index b65f2c170fb0..414f5d99d9de 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3865,32 +3865,31 @@ static inline int blk_mq_poll_hctx(struct request_queue *q, return ret; } -static int blk_mq_poll_io(struct bio *bio) +#define POLL_HCTX_MAX_CNT 8 + +static bool blk_add_unique_hctx(struct blk_mq_hw_ctx **data, int *cnt, + struct blk_mq_hw_ctx *hctx) { - struct request_queue *q = bio->bi_bdev->bd_disk->queue; - blk_qc_t cookie = bio_get_private_data(bio); - int ret = 0; + int i; - if (!bio_flagged(bio, BIO_DONE) && blk_qc_t_valid(cookie)) { - struct blk_mq_hw_ctx *hctx = - q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; + for (i = 0; i < *cnt; i++) { + if (data[i] == hctx) + goto exit; + } - ret += blk_mq_poll_hctx(q, hctx); + if (i < POLL_HCTX_MAX_CNT) { + data[i] = hctx; + (*cnt)++; } - return ret; + exit: + return *cnt == POLL_HCTX_MAX_CNT; } -static int blk_bio_poll_and_end_io(struct bio_grp_list *grps) +static void blk_build_poll_queues(struct bio_grp_list *grps, + struct blk_mq_hw_ctx **data, int *cnt) { - int ret = 0; int i; - /* - * Poll hw queue first. - * - * TODO: limit max poll times and make sure to not poll same - * hw queue one more time. - */ for (i = 0; i < grps->nr_grps; i++) { struct bio_grp_list_data *grp = &grps->head[i]; struct bio *bio; @@ -3898,11 +3897,29 @@ static int blk_bio_poll_and_end_io(struct bio_grp_list *grps) if (bio_grp_list_grp_empty(grp)) continue; - for (bio = grp->list.head; bio; bio = bio->bi_poll) - ret += blk_mq_poll_io(bio); + for (bio = grp->list.head; bio; bio = bio->bi_poll) { + blk_qc_t cookie; + struct blk_mq_hw_ctx *hctx; + struct request_queue *q; + + if (bio_flagged(bio, BIO_DONE)) + continue; + cookie = bio_get_private_data(bio); + if (!blk_qc_t_valid(cookie)) + continue; + + q = bio->bi_bdev->bd_disk->queue; + hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; + if (blk_add_unique_hctx(data, cnt, hctx)) + return; + } } +} + +static void blk_bio_poll_reap_ios(struct bio_grp_list *grps) +{ + int i; - /* reap bios */ for (i = 0; i < grps->nr_grps; i++) { struct bio_grp_list_data *grp = &grps->head[i]; struct bio *bio; @@ -3927,6 +3944,22 @@ static int blk_bio_poll_and_end_io(struct bio_grp_list *grps) } __bio_grp_list_merge(&grp->list, &bl); } +} + +static int blk_bio_poll_and_end_io(struct bio_grp_list *grps) +{ + int ret = 0; + int i; + struct blk_mq_hw_ctx *hctx[POLL_HCTX_MAX_CNT]; + int cnt = 0; + + blk_build_poll_queues(grps, hctx, &cnt); + + for (i = 0; i < cnt; i++) + ret += blk_mq_poll_hctx(hctx[i]->queue, hctx[i]); + + blk_bio_poll_reap_ios(grps); + return ret; }
Limit at most 8 queues are polled in each blk_pull(), avoid to add extra latency when queue depth is high. Signed-off-by: Ming Lei <ming.lei@redhat.com> --- block/blk-mq.c | 73 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 53 insertions(+), 20 deletions(-)