Message ID | 20200428164434.1517-3-johannes.thumshirn@wdc.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | block: drive-by cleanups for block cgroup | expand |
Looks good: Reviewed-by: Christoph Hellwig <hch@lst.de> Follow up comments below: > + rcu_read_lock(); > + > + if (!bio->bi_blkg) { > + char b[BDEVNAME_SIZE]; > + > + WARN_ONCE(1, > + "no blkg associated for bio on block-device: %s\n", > + bio_devname(bio, b)); > + bio_associate_blkg(bio); > + } > + > + blkg = bio->bi_blkg; We now always assign a bi_blkg, so as a follow on patch we should probab;y remove this check and assign blkg at the time of declaration. > + > + throtl = blk_throtl_bio(q, blkg, bio); > + > + if (!throtl) { The empty line hurts my feelings :) > + struct blkg_iostat_set *bis; > + int rwd, cpu; > + > + if (op_is_discard(bio->bi_opf)) > + rwd = BLKG_IOSTAT_DISCARD; > + else if (op_is_write(bio->bi_opf)) > + rwd = BLKG_IOSTAT_WRITE; > + else > + rwd = BLKG_IOSTAT_READ; > + > + cpu = get_cpu(); > + bis = per_cpu_ptr(blkg->iostat_cpu, cpu); > + u64_stats_update_begin(&bis->sync); > + > + /* > + * If the bio is flagged with BIO_QUEUE_ENTERED it means this > + * is a split bio and we would have already accounted for the > + * size of the bio. > + */ > + if (!bio_flagged(bio, BIO_QUEUE_ENTERED)) > + bis->cur.bytes[rwd] += bio->bi_iter.bi_size; > + bis->cur.ios[rwd]++; > + > + u64_stats_update_end(&bis->sync); > + if (cgroup_subsys_on_dfl(io_cgrp_subsys)) > + cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu); > + put_cpu(); As-is this will clash with my BIO_QUEUE_ENTERED cleanup. > @@ -666,6 +609,7 @@ static inline void blkcg_clear_delay(struct blkcg_gq *blkg) > } > } > > +bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio); It might be worth to just throw a IS_ENABLED(CONFIG_BLK_CGROUP) into the caller and avoid the need for a stub in the header.
On 29/04/2020 09:25, Christoph Hellwig wrote:
> As-is this will clash with my BIO_QUEUE_ENTERED cleanup.
Bah right, I'll re-base once Jens has applied your series.
On 29/04/2020 09:25, Christoph Hellwig wrote: > Looks good: > > Reviewed-by: Christoph Hellwig <hch@lst.de> > > Follow up comments below: > >> + rcu_read_lock(); >> + >> + if (!bio->bi_blkg) { >> + char b[BDEVNAME_SIZE]; >> + >> + WARN_ONCE(1, >> + "no blkg associated for bio on block-device: %s\n", >> + bio_devname(bio, b)); >> + bio_associate_blkg(bio); >> + } >> + >> + blkg = bio->bi_blkg; > > We now always assign a bi_blkg, so as a follow on patch we should > probab;y remove this check and assign blkg at the time of declaration. But then blkcg_bio_issue_check() can still be called with a bio->bi_blkg being NULL. What am I missing? >> + >> + throtl = blk_throtl_bio(q, blkg, bio); >> + >> + if (!throtl) { > > The empty line hurts my feelings :) Hehe, fixed. > >> + struct blkg_iostat_set *bis; >> + int rwd, cpu; >> + >> + if (op_is_discard(bio->bi_opf)) >> + rwd = BLKG_IOSTAT_DISCARD; >> + else if (op_is_write(bio->bi_opf)) >> + rwd = BLKG_IOSTAT_WRITE; >> + else >> + rwd = BLKG_IOSTAT_READ; >> + >> + cpu = get_cpu(); >> + bis = per_cpu_ptr(blkg->iostat_cpu, cpu); >> + u64_stats_update_begin(&bis->sync); >> + >> + /* >> + * If the bio is flagged with BIO_QUEUE_ENTERED it means this >> + * is a split bio and we would have already accounted for the >> + * size of the bio. >> + */ >> + if (!bio_flagged(bio, BIO_QUEUE_ENTERED)) >> + bis->cur.bytes[rwd] += bio->bi_iter.bi_size; >> + bis->cur.ios[rwd]++; >> + >> + u64_stats_update_end(&bis->sync); >> + if (cgroup_subsys_on_dfl(io_cgrp_subsys)) >> + cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu); >> + put_cpu(); > > As-is this will clash with my BIO_QUEUE_ENTERED cleanup. > >> @@ -666,6 +609,7 @@ static inline void blkcg_clear_delay(struct blkcg_gq *blkg) >> } >> } >> >> +bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio); > > It might be worth to just throw a IS_ENABLED(CONFIG_BLK_CGROUP) into > the caller and avoid the need for a stub in the header. > Aparently constant propagation doesn't work the why I thought it does: diff --git a/block/blk-core.c b/block/blk-core.c index 7f11560bfddb..4111fd759e37 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -988,7 +988,7 @@ generic_make_request_checks(struct bio *bio) if (unlikely(!current->io_context)) create_task_io_context(current, GFP_ATOMIC, q->node); - if (!blkcg_bio_issue_check(q, bio)) + if (IS_ENABLED(CONFIG_BLK_CGROUP) && !blkcg_bio_issue_check(q, bio)) return false; if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { block/blk-core.c: In function ‘generic_make_request_checks’: block/blk-core.c:991:40: error: implicit declaration of function ‘blkcg_bio_issue_check’; did you mean ‘blkcg_bio_issue_init’? [-Werror=implicit-function-declaration] 991 | if (IS_ENABLED(CONFIG_BLK_CGROUP) && !blkcg_bio_issue_check(q, bio)) | ^~~~~~~~~~~~~~~~~~~~~ | blkcg_bio_issue_init cc1: some warnings being treated as errors
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index c5dc833212e1..9003c76124e8 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1706,6 +1706,62 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) set_notify_resume(current); } +bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio) +{ + struct blkcg_gq *blkg; + bool throtl = false; + + rcu_read_lock(); + + if (!bio->bi_blkg) { + char b[BDEVNAME_SIZE]; + + WARN_ONCE(1, + "no blkg associated for bio on block-device: %s\n", + bio_devname(bio, b)); + bio_associate_blkg(bio); + } + + blkg = bio->bi_blkg; + + throtl = blk_throtl_bio(q, blkg, bio); + + if (!throtl) { + struct blkg_iostat_set *bis; + int rwd, cpu; + + if (op_is_discard(bio->bi_opf)) + rwd = BLKG_IOSTAT_DISCARD; + else if (op_is_write(bio->bi_opf)) + rwd = BLKG_IOSTAT_WRITE; + else + rwd = BLKG_IOSTAT_READ; + + cpu = get_cpu(); + bis = per_cpu_ptr(blkg->iostat_cpu, cpu); + u64_stats_update_begin(&bis->sync); + + /* + * If the bio is flagged with BIO_QUEUE_ENTERED it means this + * is a split bio and we would have already accounted for the + * size of the bio. + */ + if (!bio_flagged(bio, BIO_QUEUE_ENTERED)) + bis->cur.bytes[rwd] += bio->bi_iter.bi_size; + bis->cur.ios[rwd]++; + + u64_stats_update_end(&bis->sync); + if (cgroup_subsys_on_dfl(io_cgrp_subsys)) + cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu); + put_cpu(); + } + + blkcg_bio_issue_init(bio); + + rcu_read_unlock(); + return !throtl; +} + /** * blkcg_add_delay - add delay to this blkg * @blkg: blkg of interest diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 333885133b1f..b356d4eed08d 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -559,63 +559,6 @@ static inline void blkcg_bio_issue_init(struct bio *bio) bio_issue_init(&bio->bi_issue, bio_sectors(bio)); } -static inline bool blkcg_bio_issue_check(struct request_queue *q, - struct bio *bio) -{ - struct blkcg_gq *blkg; - bool throtl = false; - - rcu_read_lock(); - - if (!bio->bi_blkg) { - char b[BDEVNAME_SIZE]; - - WARN_ONCE(1, - "no blkg associated for bio on block-device: %s\n", - bio_devname(bio, b)); - bio_associate_blkg(bio); - } - - blkg = bio->bi_blkg; - - throtl = blk_throtl_bio(q, blkg, bio); - - if (!throtl) { - struct blkg_iostat_set *bis; - int rwd, cpu; - - if (op_is_discard(bio->bi_opf)) - rwd = BLKG_IOSTAT_DISCARD; - else if (op_is_write(bio->bi_opf)) - rwd = BLKG_IOSTAT_WRITE; - else - rwd = BLKG_IOSTAT_READ; - - cpu = get_cpu(); - bis = per_cpu_ptr(blkg->iostat_cpu, cpu); - u64_stats_update_begin(&bis->sync); - - /* - * If the bio is flagged with BIO_QUEUE_ENTERED it means this - * is a split bio and we would have already accounted for the - * size of the bio. - */ - if (!bio_flagged(bio, BIO_QUEUE_ENTERED)) - bis->cur.bytes[rwd] += bio->bi_iter.bi_size; - bis->cur.ios[rwd]++; - - u64_stats_update_end(&bis->sync); - if (cgroup_subsys_on_dfl(io_cgrp_subsys)) - cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu); - put_cpu(); - } - - blkcg_bio_issue_init(bio); - - rcu_read_unlock(); - return !throtl; -} - static inline void blkcg_use_delay(struct blkcg_gq *blkg) { if (atomic_add_return(1, &blkg->use_delay) == 1) @@ -666,6 +609,7 @@ static inline void blkcg_clear_delay(struct blkcg_gq *blkg) } } +bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio); void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); void blkcg_maybe_throttle_current(void);
blkcg_bio_issue_check() is way to big to be an inline function, move it to the other blkcg related functions in block/blk-cgroup.c. According to the bloat-o-meter this brings it's sole caller generic_make_request_checks() down from 2417 to 1881 bytes (~22%). $ ./scripts/bloat-o-meter -t vmlinux.old vmlinux add/remove: 1/0 grow/shrink: 1/1 up/down: 667/-539 (128) Function old new delta blkcg_bio_issue_check - 664 +664 generic_make_request_checks.cold 45 48 +3 generic_make_request_checks 2372 1833 -539 Total: Before=9624970, After=9625098, chg +0.00% Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> --- block/blk-cgroup.c | 56 ++++++++++++++++++++++++++++++++++++ include/linux/blk-cgroup.h | 58 +------------------------------------- 2 files changed, 57 insertions(+), 57 deletions(-)