diff mbox series

[1/3] mm/page-writeback.c: Rename BANDWIDTH_INTERVAL to UPDATE_INTERVAL

Message ID 20241002130004.69010-2-yizhou.tang@shopee.com (mailing list archive)
State Not Applicable, archived
Headers show
Series Cleanup some writeback codes | expand

Commit Message

Tang Yizhou Oct. 2, 2024, 1 p.m. UTC
From: Tang Yizhou <yizhou.tang@shopee.com>

The name of the BANDWIDTH_INTERVAL macro is misleading, as it is not
only used in the bandwidth update functions wb_update_bandwidth() and
__wb_update_bandwidth(), but also in the dirty limit update function
domain_update_dirty_limit().

Rename BANDWIDTH_INTERVAL to UPDATE_INTERVAL to make things clear.

This patche doesn't introduce any behavioral changes.

Signed-off-by: Tang Yizhou <yizhou.tang@shopee.com>
---
 mm/page-writeback.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

Comments

Jan Kara Oct. 3, 2024, 1:01 p.m. UTC | #1
On Wed 02-10-24 21:00:02, Tang Yizhou wrote:
> From: Tang Yizhou <yizhou.tang@shopee.com>
> 
> The name of the BANDWIDTH_INTERVAL macro is misleading, as it is not
> only used in the bandwidth update functions wb_update_bandwidth() and
> __wb_update_bandwidth(), but also in the dirty limit update function
> domain_update_dirty_limit().
> 
> Rename BANDWIDTH_INTERVAL to UPDATE_INTERVAL to make things clear.
> 
> This patche doesn't introduce any behavioral changes.
> 
> Signed-off-by: Tang Yizhou <yizhou.tang@shopee.com>

Umm, I agree BANDWIDTH_INTERVAL may be confusing but UPDATE_INTERVAL does
not seem much better to be honest. I actually have hard time coming up with
a more descriptive name so what if we settled on updating the comment only
instead of renaming to something not much better?

								Honza

> ---
>  mm/page-writeback.c | 16 ++++++++--------
>  1 file changed, 8 insertions(+), 8 deletions(-)
> 
> diff --git a/mm/page-writeback.c b/mm/page-writeback.c
> index fcd4c1439cb9..a848e7f0719d 100644
> --- a/mm/page-writeback.c
> +++ b/mm/page-writeback.c
> @@ -54,9 +54,9 @@
>  #define DIRTY_POLL_THRESH	(128 >> (PAGE_SHIFT - 10))
>  
>  /*
> - * Estimate write bandwidth at 200ms intervals.
> + * Estimate write bandwidth or update dirty limit at 200ms intervals.
>   */
> -#define BANDWIDTH_INTERVAL	max(HZ/5, 1)
> +#define UPDATE_INTERVAL		max(HZ/5, 1)
>  
>  #define RATELIMIT_CALC_SHIFT	10
>  
> @@ -1331,11 +1331,11 @@ static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
>  	/*
>  	 * check locklessly first to optimize away locking for the most time
>  	 */
> -	if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
> +	if (time_before(now, dom->dirty_limit_tstamp + UPDATE_INTERVAL))
>  		return;
>  
>  	spin_lock(&dom->lock);
> -	if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
> +	if (time_after_eq(now, dom->dirty_limit_tstamp + UPDATE_INTERVAL)) {
>  		update_dirty_limit(dtc);
>  		dom->dirty_limit_tstamp = now;
>  	}
> @@ -1928,7 +1928,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
>  		wb->dirty_exceeded = gdtc->dirty_exceeded ||
>  				     (mdtc && mdtc->dirty_exceeded);
>  		if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
> -					   BANDWIDTH_INTERVAL))
> +					   UPDATE_INTERVAL))
>  			__wb_update_bandwidth(gdtc, mdtc, true);
>  
>  		/* throttle according to the chosen dtc */
> @@ -2705,7 +2705,7 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
>  	 * writeback bandwidth is updated once in a while.
>  	 */
>  	if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
> -				   BANDWIDTH_INTERVAL))
> +				   UPDATE_INTERVAL))
>  		wb_update_bandwidth(wb);
>  	return ret;
>  }
> @@ -3057,14 +3057,14 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
>  	atomic_dec(&wb->writeback_inodes);
>  	/*
>  	 * Make sure estimate of writeback throughput gets updated after
> -	 * writeback completed. We delay the update by BANDWIDTH_INTERVAL
> +	 * writeback completed. We delay the update by UPDATE_INTERVAL
>  	 * (which is the interval other bandwidth updates use for batching) so
>  	 * that if multiple inodes end writeback at a similar time, they get
>  	 * batched into one bandwidth update.
>  	 */
>  	spin_lock_irqsave(&wb->work_lock, flags);
>  	if (test_bit(WB_registered, &wb->state))
> -		queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
> +		queue_delayed_work(bdi_wq, &wb->bw_dwork, UPDATE_INTERVAL);
>  	spin_unlock_irqrestore(&wb->work_lock, flags);
>  }
>  
> -- 
> 2.25.1
> 
>
Tang Yizhou Oct. 6, 2024, 12:41 p.m. UTC | #2
On Thu, Oct 3, 2024 at 9:01 PM Jan Kara <jack@suse.cz> wrote:
>
> On Wed 02-10-24 21:00:02, Tang Yizhou wrote:
> > From: Tang Yizhou <yizhou.tang@shopee.com>
> >
> > The name of the BANDWIDTH_INTERVAL macro is misleading, as it is not
> > only used in the bandwidth update functions wb_update_bandwidth() and
> > __wb_update_bandwidth(), but also in the dirty limit update function
> > domain_update_dirty_limit().
> >
> > Rename BANDWIDTH_INTERVAL to UPDATE_INTERVAL to make things clear.
> >
> > This patche doesn't introduce any behavioral changes.
> >
> > Signed-off-by: Tang Yizhou <yizhou.tang@shopee.com>
>
> Umm, I agree BANDWIDTH_INTERVAL may be confusing but UPDATE_INTERVAL does
> not seem much better to be honest. I actually have hard time coming up with
> a more descriptive name so what if we settled on updating the comment only
> instead of renaming to something not much better?
>
>                                                                 Honza

Thank you for your review. I agree that UPDATE_INTERVAL is not a good
name. How about
renaming it to BW_DIRTYLIMIT_INTERVAL?

Yi

> > ---
> >  mm/page-writeback.c | 16 ++++++++--------
> >  1 file changed, 8 insertions(+), 8 deletions(-)
> >
> > diff --git a/mm/page-writeback.c b/mm/page-writeback.c
> > index fcd4c1439cb9..a848e7f0719d 100644
> > --- a/mm/page-writeback.c
> > +++ b/mm/page-writeback.c
> > @@ -54,9 +54,9 @@
> >  #define DIRTY_POLL_THRESH    (128 >> (PAGE_SHIFT - 10))
> >
> >  /*
> > - * Estimate write bandwidth at 200ms intervals.
> > + * Estimate write bandwidth or update dirty limit at 200ms intervals.
> >   */
> > -#define BANDWIDTH_INTERVAL   max(HZ/5, 1)
> > +#define UPDATE_INTERVAL              max(HZ/5, 1)
> >
> >  #define RATELIMIT_CALC_SHIFT 10
> >
> > @@ -1331,11 +1331,11 @@ static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
> >       /*
> >        * check locklessly first to optimize away locking for the most time
> >        */
> > -     if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
> > +     if (time_before(now, dom->dirty_limit_tstamp + UPDATE_INTERVAL))
> >               return;
> >
> >       spin_lock(&dom->lock);
> > -     if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
> > +     if (time_after_eq(now, dom->dirty_limit_tstamp + UPDATE_INTERVAL)) {
> >               update_dirty_limit(dtc);
> >               dom->dirty_limit_tstamp = now;
> >       }
> > @@ -1928,7 +1928,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
> >               wb->dirty_exceeded = gdtc->dirty_exceeded ||
> >                                    (mdtc && mdtc->dirty_exceeded);
> >               if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
> > -                                        BANDWIDTH_INTERVAL))
> > +                                        UPDATE_INTERVAL))
> >                       __wb_update_bandwidth(gdtc, mdtc, true);
> >
> >               /* throttle according to the chosen dtc */
> > @@ -2705,7 +2705,7 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
> >        * writeback bandwidth is updated once in a while.
> >        */
> >       if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
> > -                                BANDWIDTH_INTERVAL))
> > +                                UPDATE_INTERVAL))
> >               wb_update_bandwidth(wb);
> >       return ret;
> >  }
> > @@ -3057,14 +3057,14 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
> >       atomic_dec(&wb->writeback_inodes);
> >       /*
> >        * Make sure estimate of writeback throughput gets updated after
> > -      * writeback completed. We delay the update by BANDWIDTH_INTERVAL
> > +      * writeback completed. We delay the update by UPDATE_INTERVAL
> >        * (which is the interval other bandwidth updates use for batching) so
> >        * that if multiple inodes end writeback at a similar time, they get
> >        * batched into one bandwidth update.
> >        */
> >       spin_lock_irqsave(&wb->work_lock, flags);
> >       if (test_bit(WB_registered, &wb->state))
> > -             queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
> > +             queue_delayed_work(bdi_wq, &wb->bw_dwork, UPDATE_INTERVAL);
> >       spin_unlock_irqrestore(&wb->work_lock, flags);
> >  }
> >
> > --
> > 2.25.1
> >
> >
> --
> Jan Kara <jack@suse.com>
> SUSE Labs, CR
Jan Kara Oct. 7, 2024, 4:23 p.m. UTC | #3
On Sun 06-10-24 20:41:11, Tang Yizhou wrote:
> On Thu, Oct 3, 2024 at 9:01 PM Jan Kara <jack@suse.cz> wrote:
> >
> > On Wed 02-10-24 21:00:02, Tang Yizhou wrote:
> > > From: Tang Yizhou <yizhou.tang@shopee.com>
> > >
> > > The name of the BANDWIDTH_INTERVAL macro is misleading, as it is not
> > > only used in the bandwidth update functions wb_update_bandwidth() and
> > > __wb_update_bandwidth(), but also in the dirty limit update function
> > > domain_update_dirty_limit().
> > >
> > > Rename BANDWIDTH_INTERVAL to UPDATE_INTERVAL to make things clear.
> > >
> > > This patche doesn't introduce any behavioral changes.
> > >
> > > Signed-off-by: Tang Yizhou <yizhou.tang@shopee.com>
> >
> > Umm, I agree BANDWIDTH_INTERVAL may be confusing but UPDATE_INTERVAL does
> > not seem much better to be honest. I actually have hard time coming up with
> > a more descriptive name so what if we settled on updating the comment only
> > instead of renaming to something not much better?
> >
> >                                                                 Honza
> 
> Thank you for your review. I agree that UPDATE_INTERVAL is not a good
> name. How about
> renaming it to BW_DIRTYLIMIT_INTERVAL?

Maybe WB_STAT_INTERVAL? Because it is interval in which we maintain
statistics about writeback behavior.

								Honza

> > > ---
> > >  mm/page-writeback.c | 16 ++++++++--------
> > >  1 file changed, 8 insertions(+), 8 deletions(-)
> > >
> > > diff --git a/mm/page-writeback.c b/mm/page-writeback.c
> > > index fcd4c1439cb9..a848e7f0719d 100644
> > > --- a/mm/page-writeback.c
> > > +++ b/mm/page-writeback.c
> > > @@ -54,9 +54,9 @@
> > >  #define DIRTY_POLL_THRESH    (128 >> (PAGE_SHIFT - 10))
> > >
> > >  /*
> > > - * Estimate write bandwidth at 200ms intervals.
> > > + * Estimate write bandwidth or update dirty limit at 200ms intervals.
> > >   */
> > > -#define BANDWIDTH_INTERVAL   max(HZ/5, 1)
> > > +#define UPDATE_INTERVAL              max(HZ/5, 1)
> > >
> > >  #define RATELIMIT_CALC_SHIFT 10
> > >
> > > @@ -1331,11 +1331,11 @@ static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
> > >       /*
> > >        * check locklessly first to optimize away locking for the most time
> > >        */
> > > -     if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
> > > +     if (time_before(now, dom->dirty_limit_tstamp + UPDATE_INTERVAL))
> > >               return;
> > >
> > >       spin_lock(&dom->lock);
> > > -     if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
> > > +     if (time_after_eq(now, dom->dirty_limit_tstamp + UPDATE_INTERVAL)) {
> > >               update_dirty_limit(dtc);
> > >               dom->dirty_limit_tstamp = now;
> > >       }
> > > @@ -1928,7 +1928,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
> > >               wb->dirty_exceeded = gdtc->dirty_exceeded ||
> > >                                    (mdtc && mdtc->dirty_exceeded);
> > >               if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
> > > -                                        BANDWIDTH_INTERVAL))
> > > +                                        UPDATE_INTERVAL))
> > >                       __wb_update_bandwidth(gdtc, mdtc, true);
> > >
> > >               /* throttle according to the chosen dtc */
> > > @@ -2705,7 +2705,7 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
> > >        * writeback bandwidth is updated once in a while.
> > >        */
> > >       if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
> > > -                                BANDWIDTH_INTERVAL))
> > > +                                UPDATE_INTERVAL))
> > >               wb_update_bandwidth(wb);
> > >       return ret;
> > >  }
> > > @@ -3057,14 +3057,14 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
> > >       atomic_dec(&wb->writeback_inodes);
> > >       /*
> > >        * Make sure estimate of writeback throughput gets updated after
> > > -      * writeback completed. We delay the update by BANDWIDTH_INTERVAL
> > > +      * writeback completed. We delay the update by UPDATE_INTERVAL
> > >        * (which is the interval other bandwidth updates use for batching) so
> > >        * that if multiple inodes end writeback at a similar time, they get
> > >        * batched into one bandwidth update.
> > >        */
> > >       spin_lock_irqsave(&wb->work_lock, flags);
> > >       if (test_bit(WB_registered, &wb->state))
> > > -             queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
> > > +             queue_delayed_work(bdi_wq, &wb->bw_dwork, UPDATE_INTERVAL);
> > >       spin_unlock_irqrestore(&wb->work_lock, flags);
> > >  }
> > >
> > > --
> > > 2.25.1
> > >
> > >
> > --
> > Jan Kara <jack@suse.com>
> > SUSE Labs, CR
Tang Yizhou Oct. 8, 2024, 2:14 p.m. UTC | #4
On Tue, Oct 8, 2024 at 12:23 AM Jan Kara <jack@suse.cz> wrote:
>
> On Sun 06-10-24 20:41:11, Tang Yizhou wrote:
> > On Thu, Oct 3, 2024 at 9:01 PM Jan Kara <jack@suse.cz> wrote:
> > >
> > > On Wed 02-10-24 21:00:02, Tang Yizhou wrote:
> > > > From: Tang Yizhou <yizhou.tang@shopee.com>
> > > >
> > > > The name of the BANDWIDTH_INTERVAL macro is misleading, as it is not
> > > > only used in the bandwidth update functions wb_update_bandwidth() and
> > > > __wb_update_bandwidth(), but also in the dirty limit update function
> > > > domain_update_dirty_limit().
> > > >
> > > > Rename BANDWIDTH_INTERVAL to UPDATE_INTERVAL to make things clear.
> > > >
> > > > This patche doesn't introduce any behavioral changes.
> > > >
> > > > Signed-off-by: Tang Yizhou <yizhou.tang@shopee.com>
> > >
> > > Umm, I agree BANDWIDTH_INTERVAL may be confusing but UPDATE_INTERVAL does
> > > not seem much better to be honest. I actually have hard time coming up with
> > > a more descriptive name so what if we settled on updating the comment only
> > > instead of renaming to something not much better?
> > >
> > >                                                                 Honza
> >
> > Thank you for your review. I agree that UPDATE_INTERVAL is not a good
> > name. How about
> > renaming it to BW_DIRTYLIMIT_INTERVAL?
>
> Maybe WB_STAT_INTERVAL? Because it is interval in which we maintain
> statistics about writeback behavior.
>

I don't think this is a good name, as it suggests a relation to enum
wb_stat_item, but bandwidth and dirty limit are not in wb_stat_item.

Yi

>                                                                 Honza
>
> > > > ---
> > > >  mm/page-writeback.c | 16 ++++++++--------
> > > >  1 file changed, 8 insertions(+), 8 deletions(-)
> > > >
> > > > diff --git a/mm/page-writeback.c b/mm/page-writeback.c
> > > > index fcd4c1439cb9..a848e7f0719d 100644
> > > > --- a/mm/page-writeback.c
> > > > +++ b/mm/page-writeback.c
> > > > @@ -54,9 +54,9 @@
> > > >  #define DIRTY_POLL_THRESH    (128 >> (PAGE_SHIFT - 10))
> > > >
> > > >  /*
> > > > - * Estimate write bandwidth at 200ms intervals.
> > > > + * Estimate write bandwidth or update dirty limit at 200ms intervals.
> > > >   */
> > > > -#define BANDWIDTH_INTERVAL   max(HZ/5, 1)
> > > > +#define UPDATE_INTERVAL              max(HZ/5, 1)
> > > >
> > > >  #define RATELIMIT_CALC_SHIFT 10
> > > >
> > > > @@ -1331,11 +1331,11 @@ static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
> > > >       /*
> > > >        * check locklessly first to optimize away locking for the most time
> > > >        */
> > > > -     if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
> > > > +     if (time_before(now, dom->dirty_limit_tstamp + UPDATE_INTERVAL))
> > > >               return;
> > > >
> > > >       spin_lock(&dom->lock);
> > > > -     if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
> > > > +     if (time_after_eq(now, dom->dirty_limit_tstamp + UPDATE_INTERVAL)) {
> > > >               update_dirty_limit(dtc);
> > > >               dom->dirty_limit_tstamp = now;
> > > >       }
> > > > @@ -1928,7 +1928,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
> > > >               wb->dirty_exceeded = gdtc->dirty_exceeded ||
> > > >                                    (mdtc && mdtc->dirty_exceeded);
> > > >               if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
> > > > -                                        BANDWIDTH_INTERVAL))
> > > > +                                        UPDATE_INTERVAL))
> > > >                       __wb_update_bandwidth(gdtc, mdtc, true);
> > > >
> > > >               /* throttle according to the chosen dtc */
> > > > @@ -2705,7 +2705,7 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
> > > >        * writeback bandwidth is updated once in a while.
> > > >        */
> > > >       if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
> > > > -                                BANDWIDTH_INTERVAL))
> > > > +                                UPDATE_INTERVAL))
> > > >               wb_update_bandwidth(wb);
> > > >       return ret;
> > > >  }
> > > > @@ -3057,14 +3057,14 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
> > > >       atomic_dec(&wb->writeback_inodes);
> > > >       /*
> > > >        * Make sure estimate of writeback throughput gets updated after
> > > > -      * writeback completed. We delay the update by BANDWIDTH_INTERVAL
> > > > +      * writeback completed. We delay the update by UPDATE_INTERVAL
> > > >        * (which is the interval other bandwidth updates use for batching) so
> > > >        * that if multiple inodes end writeback at a similar time, they get
> > > >        * batched into one bandwidth update.
> > > >        */
> > > >       spin_lock_irqsave(&wb->work_lock, flags);
> > > >       if (test_bit(WB_registered, &wb->state))
> > > > -             queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
> > > > +             queue_delayed_work(bdi_wq, &wb->bw_dwork, UPDATE_INTERVAL);
> > > >       spin_unlock_irqrestore(&wb->work_lock, flags);
> > > >  }
> > > >
> > > > --
> > > > 2.25.1
> > > >
> > > >
> > > --
> > > Jan Kara <jack@suse.com>
> > > SUSE Labs, CR
> --
> Jan Kara <jack@suse.com>
> SUSE Labs, CR
Jan Kara Oct. 9, 2024, 2:55 p.m. UTC | #5
On Tue 08-10-24 22:14:16, Tang Yizhou wrote:
> On Tue, Oct 8, 2024 at 12:23 AM Jan Kara <jack@suse.cz> wrote:
> >
> > On Sun 06-10-24 20:41:11, Tang Yizhou wrote:
> > > On Thu, Oct 3, 2024 at 9:01 PM Jan Kara <jack@suse.cz> wrote:
> > > >
> > > > On Wed 02-10-24 21:00:02, Tang Yizhou wrote:
> > > > > From: Tang Yizhou <yizhou.tang@shopee.com>
> > > > >
> > > > > The name of the BANDWIDTH_INTERVAL macro is misleading, as it is not
> > > > > only used in the bandwidth update functions wb_update_bandwidth() and
> > > > > __wb_update_bandwidth(), but also in the dirty limit update function
> > > > > domain_update_dirty_limit().
> > > > >
> > > > > Rename BANDWIDTH_INTERVAL to UPDATE_INTERVAL to make things clear.
> > > > >
> > > > > This patche doesn't introduce any behavioral changes.
> > > > >
> > > > > Signed-off-by: Tang Yizhou <yizhou.tang@shopee.com>
> > > >
> > > > Umm, I agree BANDWIDTH_INTERVAL may be confusing but UPDATE_INTERVAL does
> > > > not seem much better to be honest. I actually have hard time coming up with
> > > > a more descriptive name so what if we settled on updating the comment only
> > > > instead of renaming to something not much better?
> > > >
> > > >                                                                 Honza
> > >
> > > Thank you for your review. I agree that UPDATE_INTERVAL is not a good
> > > name. How about
> > > renaming it to BW_DIRTYLIMIT_INTERVAL?
> >
> > Maybe WB_STAT_INTERVAL? Because it is interval in which we maintain
> > statistics about writeback behavior.
> >
> 
> I don't think this is a good name, as it suggests a relation to enum
> wb_stat_item, but bandwidth and dirty limit are not in wb_stat_item.

OK, so how about keeping BANDWIDTH_INTERVAL as is and adding
DIRTY_LIMIT_INTERVAL with the same value? There's nothing which would
strictly tie them to the same value.

								Honza

> > > > > ---
> > > > >  mm/page-writeback.c | 16 ++++++++--------
> > > > >  1 file changed, 8 insertions(+), 8 deletions(-)
> > > > >
> > > > > diff --git a/mm/page-writeback.c b/mm/page-writeback.c
> > > > > index fcd4c1439cb9..a848e7f0719d 100644
> > > > > --- a/mm/page-writeback.c
> > > > > +++ b/mm/page-writeback.c
> > > > > @@ -54,9 +54,9 @@
> > > > >  #define DIRTY_POLL_THRESH    (128 >> (PAGE_SHIFT - 10))
> > > > >
> > > > >  /*
> > > > > - * Estimate write bandwidth at 200ms intervals.
> > > > > + * Estimate write bandwidth or update dirty limit at 200ms intervals.
> > > > >   */
> > > > > -#define BANDWIDTH_INTERVAL   max(HZ/5, 1)
> > > > > +#define UPDATE_INTERVAL              max(HZ/5, 1)
> > > > >
> > > > >  #define RATELIMIT_CALC_SHIFT 10
> > > > >
> > > > > @@ -1331,11 +1331,11 @@ static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
> > > > >       /*
> > > > >        * check locklessly first to optimize away locking for the most time
> > > > >        */
> > > > > -     if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
> > > > > +     if (time_before(now, dom->dirty_limit_tstamp + UPDATE_INTERVAL))
> > > > >               return;
> > > > >
> > > > >       spin_lock(&dom->lock);
> > > > > -     if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
> > > > > +     if (time_after_eq(now, dom->dirty_limit_tstamp + UPDATE_INTERVAL)) {
> > > > >               update_dirty_limit(dtc);
> > > > >               dom->dirty_limit_tstamp = now;
> > > > >       }
> > > > > @@ -1928,7 +1928,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
> > > > >               wb->dirty_exceeded = gdtc->dirty_exceeded ||
> > > > >                                    (mdtc && mdtc->dirty_exceeded);
> > > > >               if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
> > > > > -                                        BANDWIDTH_INTERVAL))
> > > > > +                                        UPDATE_INTERVAL))
> > > > >                       __wb_update_bandwidth(gdtc, mdtc, true);
> > > > >
> > > > >               /* throttle according to the chosen dtc */
> > > > > @@ -2705,7 +2705,7 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
> > > > >        * writeback bandwidth is updated once in a while.
> > > > >        */
> > > > >       if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
> > > > > -                                BANDWIDTH_INTERVAL))
> > > > > +                                UPDATE_INTERVAL))
> > > > >               wb_update_bandwidth(wb);
> > > > >       return ret;
> > > > >  }
> > > > > @@ -3057,14 +3057,14 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
> > > > >       atomic_dec(&wb->writeback_inodes);
> > > > >       /*
> > > > >        * Make sure estimate of writeback throughput gets updated after
> > > > > -      * writeback completed. We delay the update by BANDWIDTH_INTERVAL
> > > > > +      * writeback completed. We delay the update by UPDATE_INTERVAL
> > > > >        * (which is the interval other bandwidth updates use for batching) so
> > > > >        * that if multiple inodes end writeback at a similar time, they get
> > > > >        * batched into one bandwidth update.
> > > > >        */
> > > > >       spin_lock_irqsave(&wb->work_lock, flags);
> > > > >       if (test_bit(WB_registered, &wb->state))
> > > > > -             queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
> > > > > +             queue_delayed_work(bdi_wq, &wb->bw_dwork, UPDATE_INTERVAL);
> > > > >       spin_unlock_irqrestore(&wb->work_lock, flags);
> > > > >  }
> > > > >
> > > > > --
> > > > > 2.25.1
> > > > >
> > > > >
> > > > --
> > > > Jan Kara <jack@suse.com>
> > > > SUSE Labs, CR
> > --
> > Jan Kara <jack@suse.com>
> > SUSE Labs, CR
Tang Yizhou Oct. 10, 2024, 3:26 a.m. UTC | #6
On Wed, Oct 9, 2024 at 10:55 PM Jan Kara <jack@suse.cz> wrote:
>
> On Tue 08-10-24 22:14:16, Tang Yizhou wrote:
> > On Tue, Oct 8, 2024 at 12:23 AM Jan Kara <jack@suse.cz> wrote:
> > >
> > > On Sun 06-10-24 20:41:11, Tang Yizhou wrote:
> > > > On Thu, Oct 3, 2024 at 9:01 PM Jan Kara <jack@suse.cz> wrote:
> > > > >
> > > > > On Wed 02-10-24 21:00:02, Tang Yizhou wrote:
> > > > > > From: Tang Yizhou <yizhou.tang@shopee.com>
> > > > > >
> > > > > > The name of the BANDWIDTH_INTERVAL macro is misleading, as it is not
> > > > > > only used in the bandwidth update functions wb_update_bandwidth() and
> > > > > > __wb_update_bandwidth(), but also in the dirty limit update function
> > > > > > domain_update_dirty_limit().
> > > > > >
> > > > > > Rename BANDWIDTH_INTERVAL to UPDATE_INTERVAL to make things clear.
> > > > > >
> > > > > > This patche doesn't introduce any behavioral changes.
> > > > > >
> > > > > > Signed-off-by: Tang Yizhou <yizhou.tang@shopee.com>
> > > > >
> > > > > Umm, I agree BANDWIDTH_INTERVAL may be confusing but UPDATE_INTERVAL does
> > > > > not seem much better to be honest. I actually have hard time coming up with
> > > > > a more descriptive name so what if we settled on updating the comment only
> > > > > instead of renaming to something not much better?
> > > > >
> > > > >                                                                 Honza
> > > >
> > > > Thank you for your review. I agree that UPDATE_INTERVAL is not a good
> > > > name. How about
> > > > renaming it to BW_DIRTYLIMIT_INTERVAL?
> > >
> > > Maybe WB_STAT_INTERVAL? Because it is interval in which we maintain
> > > statistics about writeback behavior.
> > >
> >
> > I don't think this is a good name, as it suggests a relation to enum
> > wb_stat_item, but bandwidth and dirty limit are not in wb_stat_item.
>
> OK, so how about keeping BANDWIDTH_INTERVAL as is and adding
> DIRTY_LIMIT_INTERVAL with the same value? There's nothing which would
> strictly tie them to the same value.
>

Good idea, but this patch has already been merged. If there is any
writeback-related code that needs to be modified next time, I will
update this part as well.

Yi

>                                                                 Honza
>
> > > > > > ---
> > > > > >  mm/page-writeback.c | 16 ++++++++--------
> > > > > >  1 file changed, 8 insertions(+), 8 deletions(-)
> > > > > >
> > > > > > diff --git a/mm/page-writeback.c b/mm/page-writeback.c
> > > > > > index fcd4c1439cb9..a848e7f0719d 100644
> > > > > > --- a/mm/page-writeback.c
> > > > > > +++ b/mm/page-writeback.c
> > > > > > @@ -54,9 +54,9 @@
> > > > > >  #define DIRTY_POLL_THRESH    (128 >> (PAGE_SHIFT - 10))
> > > > > >
> > > > > >  /*
> > > > > > - * Estimate write bandwidth at 200ms intervals.
> > > > > > + * Estimate write bandwidth or update dirty limit at 200ms intervals.
> > > > > >   */
> > > > > > -#define BANDWIDTH_INTERVAL   max(HZ/5, 1)
> > > > > > +#define UPDATE_INTERVAL              max(HZ/5, 1)
> > > > > >
> > > > > >  #define RATELIMIT_CALC_SHIFT 10
> > > > > >
> > > > > > @@ -1331,11 +1331,11 @@ static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
> > > > > >       /*
> > > > > >        * check locklessly first to optimize away locking for the most time
> > > > > >        */
> > > > > > -     if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
> > > > > > +     if (time_before(now, dom->dirty_limit_tstamp + UPDATE_INTERVAL))
> > > > > >               return;
> > > > > >
> > > > > >       spin_lock(&dom->lock);
> > > > > > -     if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
> > > > > > +     if (time_after_eq(now, dom->dirty_limit_tstamp + UPDATE_INTERVAL)) {
> > > > > >               update_dirty_limit(dtc);
> > > > > >               dom->dirty_limit_tstamp = now;
> > > > > >       }
> > > > > > @@ -1928,7 +1928,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
> > > > > >               wb->dirty_exceeded = gdtc->dirty_exceeded ||
> > > > > >                                    (mdtc && mdtc->dirty_exceeded);
> > > > > >               if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
> > > > > > -                                        BANDWIDTH_INTERVAL))
> > > > > > +                                        UPDATE_INTERVAL))
> > > > > >                       __wb_update_bandwidth(gdtc, mdtc, true);
> > > > > >
> > > > > >               /* throttle according to the chosen dtc */
> > > > > > @@ -2705,7 +2705,7 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
> > > > > >        * writeback bandwidth is updated once in a while.
> > > > > >        */
> > > > > >       if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
> > > > > > -                                BANDWIDTH_INTERVAL))
> > > > > > +                                UPDATE_INTERVAL))
> > > > > >               wb_update_bandwidth(wb);
> > > > > >       return ret;
> > > > > >  }
> > > > > > @@ -3057,14 +3057,14 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
> > > > > >       atomic_dec(&wb->writeback_inodes);
> > > > > >       /*
> > > > > >        * Make sure estimate of writeback throughput gets updated after
> > > > > > -      * writeback completed. We delay the update by BANDWIDTH_INTERVAL
> > > > > > +      * writeback completed. We delay the update by UPDATE_INTERVAL
> > > > > >        * (which is the interval other bandwidth updates use for batching) so
> > > > > >        * that if multiple inodes end writeback at a similar time, they get
> > > > > >        * batched into one bandwidth update.
> > > > > >        */
> > > > > >       spin_lock_irqsave(&wb->work_lock, flags);
> > > > > >       if (test_bit(WB_registered, &wb->state))
> > > > > > -             queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
> > > > > > +             queue_delayed_work(bdi_wq, &wb->bw_dwork, UPDATE_INTERVAL);
> > > > > >       spin_unlock_irqrestore(&wb->work_lock, flags);
> > > > > >  }
> > > > > >
> > > > > > --
> > > > > > 2.25.1
> > > > > >
> > > > > >
> > > > > --
> > > > > Jan Kara <jack@suse.com>
> > > > > SUSE Labs, CR
> > > --
> > > Jan Kara <jack@suse.com>
> > > SUSE Labs, CR
> --
> Jan Kara <jack@suse.com>
> SUSE Labs, CR
diff mbox series

Patch

diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index fcd4c1439cb9..a848e7f0719d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -54,9 +54,9 @@ 
 #define DIRTY_POLL_THRESH	(128 >> (PAGE_SHIFT - 10))
 
 /*
- * Estimate write bandwidth at 200ms intervals.
+ * Estimate write bandwidth or update dirty limit at 200ms intervals.
  */
-#define BANDWIDTH_INTERVAL	max(HZ/5, 1)
+#define UPDATE_INTERVAL		max(HZ/5, 1)
 
 #define RATELIMIT_CALC_SHIFT	10
 
@@ -1331,11 +1331,11 @@  static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
 	/*
 	 * check locklessly first to optimize away locking for the most time
 	 */
-	if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
+	if (time_before(now, dom->dirty_limit_tstamp + UPDATE_INTERVAL))
 		return;
 
 	spin_lock(&dom->lock);
-	if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
+	if (time_after_eq(now, dom->dirty_limit_tstamp + UPDATE_INTERVAL)) {
 		update_dirty_limit(dtc);
 		dom->dirty_limit_tstamp = now;
 	}
@@ -1928,7 +1928,7 @@  static int balance_dirty_pages(struct bdi_writeback *wb,
 		wb->dirty_exceeded = gdtc->dirty_exceeded ||
 				     (mdtc && mdtc->dirty_exceeded);
 		if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
-					   BANDWIDTH_INTERVAL))
+					   UPDATE_INTERVAL))
 			__wb_update_bandwidth(gdtc, mdtc, true);
 
 		/* throttle according to the chosen dtc */
@@ -2705,7 +2705,7 @@  int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
 	 * writeback bandwidth is updated once in a while.
 	 */
 	if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
-				   BANDWIDTH_INTERVAL))
+				   UPDATE_INTERVAL))
 		wb_update_bandwidth(wb);
 	return ret;
 }
@@ -3057,14 +3057,14 @@  static void wb_inode_writeback_end(struct bdi_writeback *wb)
 	atomic_dec(&wb->writeback_inodes);
 	/*
 	 * Make sure estimate of writeback throughput gets updated after
-	 * writeback completed. We delay the update by BANDWIDTH_INTERVAL
+	 * writeback completed. We delay the update by UPDATE_INTERVAL
 	 * (which is the interval other bandwidth updates use for batching) so
 	 * that if multiple inodes end writeback at a similar time, they get
 	 * batched into one bandwidth update.
 	 */
 	spin_lock_irqsave(&wb->work_lock, flags);
 	if (test_bit(WB_registered, &wb->state))
-		queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
+		queue_delayed_work(bdi_wq, &wb->bw_dwork, UPDATE_INTERVAL);
 	spin_unlock_irqrestore(&wb->work_lock, flags);
 }