diff mbox series

[v2,1/4] mm: enable __wb_calc_thresh to calculate dirty background threshold

Message ID 20240425131724.36778-2-shikemeng@huaweicloud.com (mailing list archive)
State New
Headers show
Series Fix and cleanups to page-writeback | expand

Commit Message

Kemeng Shi April 25, 2024, 1:17 p.m. UTC
Originally, __wb_calc_thresh always calculate wb's share of dirty
throttling threshold. By getting thresh of wb_domain from caller,
__wb_calc_thresh could be used for both dirty throttling and dirty
background threshold.

This is a preparation to correct threshold calculation of wb in cgroup.

Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
---
 mm/page-writeback.c | 33 ++++++++++++++++++---------------
 1 file changed, 18 insertions(+), 15 deletions(-)

Comments

Jan Kara May 3, 2024, 9:11 a.m. UTC | #1
On Thu 25-04-24 21:17:21, Kemeng Shi wrote:
> Originally, __wb_calc_thresh always calculate wb's share of dirty
> throttling threshold. By getting thresh of wb_domain from caller,
> __wb_calc_thresh could be used for both dirty throttling and dirty
> background threshold.
> 
> This is a preparation to correct threshold calculation of wb in cgroup.
> 
> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>

Looks good. Feel free to add:

Reviewed-by: Jan Kara <jack@suse.cz>

								Honza

> ---
>  mm/page-writeback.c | 33 ++++++++++++++++++---------------
>  1 file changed, 18 insertions(+), 15 deletions(-)
> 
> diff --git a/mm/page-writeback.c b/mm/page-writeback.c
> index db5769cb12fd..2a3b68aae336 100644
> --- a/mm/page-writeback.c
> +++ b/mm/page-writeback.c
> @@ -838,13 +838,15 @@ static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
>  }
>  
>  /**
> - * __wb_calc_thresh - @wb's share of dirty throttling threshold
> + * __wb_calc_thresh - @wb's share of dirty threshold
>   * @dtc: dirty_throttle_context of interest
> + * @thresh: dirty throttling or dirty background threshold of wb_domain in @dtc
>   *
> - * Note that balance_dirty_pages() will only seriously take it as a hard limit
> - * when sleeping max_pause per page is not enough to keep the dirty pages under
> - * control. For example, when the device is completely stalled due to some error
> - * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
> + * Note that balance_dirty_pages() will only seriously take dirty throttling
> + * threshold as a hard limit when sleeping max_pause per page is not enough
> + * to keep the dirty pages under control. For example, when the device is
> + * completely stalled due to some error conditions, or when there are 1000
> + * dd tasks writing to a slow 10MB/s USB key.
>   * In the other normal situations, it acts more gently by throttling the tasks
>   * more (rather than completely block them) when the wb dirty pages go high.
>   *
> @@ -855,19 +857,20 @@ static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
>   * The wb's share of dirty limit will be adapting to its throughput and
>   * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
>   *
> - * Return: @wb's dirty limit in pages. The term "dirty" in the context of
> - * dirty balancing includes all PG_dirty and PG_writeback pages.
> + * Return: @wb's dirty limit in pages. For dirty throttling limit, the term
> + * "dirty" in the context of dirty balancing includes all PG_dirty and
> + * PG_writeback pages.
>   */
> -static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
> +static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc,
> +				      unsigned long thresh)
>  {
>  	struct wb_domain *dom = dtc_dom(dtc);
> -	unsigned long thresh = dtc->thresh;
>  	u64 wb_thresh;
>  	unsigned long numerator, denominator;
>  	unsigned long wb_min_ratio, wb_max_ratio;
>  
>  	/*
> -	 * Calculate this BDI's share of the thresh ratio.
> +	 * Calculate this wb's share of the thresh ratio.
>  	 */
>  	fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
>  			      &numerator, &denominator);
> @@ -887,9 +890,9 @@ static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
>  
>  unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
>  {
> -	struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
> -					       .thresh = thresh };
> -	return __wb_calc_thresh(&gdtc);
> +	struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
> +
> +	return __wb_calc_thresh(&gdtc, thresh);
>  }
>  
>  unsigned long cgwb_calc_thresh(struct bdi_writeback *wb)
> @@ -908,7 +911,7 @@ unsigned long cgwb_calc_thresh(struct bdi_writeback *wb)
>  	mdtc_calc_avail(&mdtc, filepages, headroom);
>  	domain_dirty_limits(&mdtc);
>  
> -	return __wb_calc_thresh(&mdtc);
> +	return __wb_calc_thresh(&mdtc, mdtc.thresh);
>  }
>  
>  /*
> @@ -1655,7 +1658,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
>  	 *   wb_position_ratio() will let the dirtier task progress
>  	 *   at some rate <= (write_bw / 2) for bringing down wb_dirty.
>  	 */
> -	dtc->wb_thresh = __wb_calc_thresh(dtc);
> +	dtc->wb_thresh = __wb_calc_thresh(dtc, dtc->thresh);
>  	dtc->wb_bg_thresh = dtc->thresh ?
>  		div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
>  
> -- 
> 2.30.0
>
diff mbox series

Patch

diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index db5769cb12fd..2a3b68aae336 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -838,13 +838,15 @@  static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
 }
 
 /**
- * __wb_calc_thresh - @wb's share of dirty throttling threshold
+ * __wb_calc_thresh - @wb's share of dirty threshold
  * @dtc: dirty_throttle_context of interest
+ * @thresh: dirty throttling or dirty background threshold of wb_domain in @dtc
  *
- * Note that balance_dirty_pages() will only seriously take it as a hard limit
- * when sleeping max_pause per page is not enough to keep the dirty pages under
- * control. For example, when the device is completely stalled due to some error
- * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
+ * Note that balance_dirty_pages() will only seriously take dirty throttling
+ * threshold as a hard limit when sleeping max_pause per page is not enough
+ * to keep the dirty pages under control. For example, when the device is
+ * completely stalled due to some error conditions, or when there are 1000
+ * dd tasks writing to a slow 10MB/s USB key.
  * In the other normal situations, it acts more gently by throttling the tasks
  * more (rather than completely block them) when the wb dirty pages go high.
  *
@@ -855,19 +857,20 @@  static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
  * The wb's share of dirty limit will be adapting to its throughput and
  * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
  *
- * Return: @wb's dirty limit in pages. The term "dirty" in the context of
- * dirty balancing includes all PG_dirty and PG_writeback pages.
+ * Return: @wb's dirty limit in pages. For dirty throttling limit, the term
+ * "dirty" in the context of dirty balancing includes all PG_dirty and
+ * PG_writeback pages.
  */
-static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
+static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc,
+				      unsigned long thresh)
 {
 	struct wb_domain *dom = dtc_dom(dtc);
-	unsigned long thresh = dtc->thresh;
 	u64 wb_thresh;
 	unsigned long numerator, denominator;
 	unsigned long wb_min_ratio, wb_max_ratio;
 
 	/*
-	 * Calculate this BDI's share of the thresh ratio.
+	 * Calculate this wb's share of the thresh ratio.
 	 */
 	fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
 			      &numerator, &denominator);
@@ -887,9 +890,9 @@  static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
 
 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
 {
-	struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
-					       .thresh = thresh };
-	return __wb_calc_thresh(&gdtc);
+	struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
+
+	return __wb_calc_thresh(&gdtc, thresh);
 }
 
 unsigned long cgwb_calc_thresh(struct bdi_writeback *wb)
@@ -908,7 +911,7 @@  unsigned long cgwb_calc_thresh(struct bdi_writeback *wb)
 	mdtc_calc_avail(&mdtc, filepages, headroom);
 	domain_dirty_limits(&mdtc);
 
-	return __wb_calc_thresh(&mdtc);
+	return __wb_calc_thresh(&mdtc, mdtc.thresh);
 }
 
 /*
@@ -1655,7 +1658,7 @@  static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
 	 *   wb_position_ratio() will let the dirtier task progress
 	 *   at some rate <= (write_bw / 2) for bringing down wb_dirty.
 	 */
-	dtc->wb_thresh = __wb_calc_thresh(dtc);
+	dtc->wb_thresh = __wb_calc_thresh(dtc, dtc->thresh);
 	dtc->wb_bg_thresh = dtc->thresh ?
 		div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;