diff mbox series

mm: Use unsigned types for fragmentation score

Message ID 20200618010319.13159-1-nigupta@nvidia.com (mailing list archive)
State New, archived
Headers show
Series mm: Use unsigned types for fragmentation score | expand

Commit Message

Nitin Gupta June 18, 2020, 1:03 a.m. UTC
Proactive compaction uses per-node/zone "fragmentation score" which
is always in range [0, 100], so use unsigned type of these scores
as well as for related constants.

Signed-off-by: Nitin Gupta <nigupta@nvidia.com>
---
 include/linux/compaction.h |  4 ++--
 kernel/sysctl.c            |  2 +-
 mm/compaction.c            | 18 +++++++++---------
 mm/vmstat.c                |  2 +-
 4 files changed, 13 insertions(+), 13 deletions(-)

Comments

Baoquan He June 18, 2020, 1:41 p.m. UTC | #1
On 06/17/20 at 06:03pm, Nitin Gupta wrote:
> Proactive compaction uses per-node/zone "fragmentation score" which
> is always in range [0, 100], so use unsigned type of these scores
> as well as for related constants.
> 
> Signed-off-by: Nitin Gupta <nigupta@nvidia.com>
> ---
>  include/linux/compaction.h |  4 ++--
>  kernel/sysctl.c            |  2 +-
>  mm/compaction.c            | 18 +++++++++---------
>  mm/vmstat.c                |  2 +-
>  4 files changed, 13 insertions(+), 13 deletions(-)
> 
> diff --git a/include/linux/compaction.h b/include/linux/compaction.h
> index 7a242d46454e..25a521d299c1 100644
> --- a/include/linux/compaction.h
> +++ b/include/linux/compaction.h
> @@ -85,13 +85,13 @@ static inline unsigned long compact_gap(unsigned int order)
>  
>  #ifdef CONFIG_COMPACTION
>  extern int sysctl_compact_memory;
> -extern int sysctl_compaction_proactiveness;
> +extern unsigned int sysctl_compaction_proactiveness;
>  extern int sysctl_compaction_handler(struct ctl_table *table, int write,
>  			void *buffer, size_t *length, loff_t *ppos);
>  extern int sysctl_extfrag_threshold;
>  extern int sysctl_compact_unevictable_allowed;
>  
> -extern int extfrag_for_order(struct zone *zone, unsigned int order);
> +extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
>  extern int fragmentation_index(struct zone *zone, unsigned int order);
>  extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
>  		unsigned int order, unsigned int alloc_flags,
> diff --git a/kernel/sysctl.c b/kernel/sysctl.c
> index 58b0a59c9769..40180cdde486 100644
> --- a/kernel/sysctl.c
> +++ b/kernel/sysctl.c
> @@ -2833,7 +2833,7 @@ static struct ctl_table vm_table[] = {
>  	{
>  		.procname	= "compaction_proactiveness",
>  		.data		= &sysctl_compaction_proactiveness,
> -		.maxlen		= sizeof(int),
> +		.maxlen		= sizeof(sysctl_compaction_proactiveness),

Patch looks good to me. Wondering why not using 'unsigned int' here,
just curious.

>  		.mode		= 0644,
>  		.proc_handler	= proc_dointvec_minmax,
>  		.extra1		= SYSCTL_ZERO,
> diff --git a/mm/compaction.c b/mm/compaction.c
> index ac2030814edb..45fd24a0ea0b 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -53,7 +53,7 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
>  /*
>   * Fragmentation score check interval for proactive compaction purposes.
>   */
> -static const int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
> +static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
>  
>  /*
>   * Page order with-respect-to which proactive compaction
> @@ -1890,7 +1890,7 @@ static bool kswapd_is_running(pg_data_t *pgdat)
>   * ZONE_DMA32. For smaller zones, the score value remains close to zero,
>   * and thus never exceeds the high threshold for proactive compaction.
>   */
> -static int fragmentation_score_zone(struct zone *zone)
> +static unsigned int fragmentation_score_zone(struct zone *zone)
>  {
>  	unsigned long score;
>  
> @@ -1906,9 +1906,9 @@ static int fragmentation_score_zone(struct zone *zone)
>   * the node's score falls below the low threshold, or one of the back-off
>   * conditions is met.
>   */
> -static int fragmentation_score_node(pg_data_t *pgdat)
> +static unsigned int fragmentation_score_node(pg_data_t *pgdat)
>  {
> -	unsigned long score = 0;
> +	unsigned int score = 0;
>  	int zoneid;
>  
>  	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
> @@ -1921,17 +1921,17 @@ static int fragmentation_score_node(pg_data_t *pgdat)
>  	return score;
>  }
>  
> -static int fragmentation_score_wmark(pg_data_t *pgdat, bool low)
> +static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low)
>  {
> -	int wmark_low;
> +	unsigned int wmark_low;
>  
>  	/*
>  	 * Cap the low watermak to avoid excessive compaction
>  	 * activity in case a user sets the proactivess tunable
>  	 * close to 100 (maximum).
>  	 */
> -	wmark_low = max(100 - sysctl_compaction_proactiveness, 5);
> -	return low ? wmark_low : min(wmark_low + 10, 100);
> +	wmark_low = max(100U - sysctl_compaction_proactiveness, 5U);
> +	return low ? wmark_low : min(wmark_low + 10, 100U);
>  }
>  
>  static bool should_proactive_compact_node(pg_data_t *pgdat)
> @@ -2604,7 +2604,7 @@ int sysctl_compact_memory;
>   * aggressively the kernel should compact memory in the
>   * background. It takes values in the range [0, 100].
>   */
> -int __read_mostly sysctl_compaction_proactiveness = 20;
> +unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
>  
>  /*
>   * This is the entry point for compacting all nodes via
> diff --git a/mm/vmstat.c b/mm/vmstat.c
> index 3e7ba8bce2ba..b1de695b826d 100644
> --- a/mm/vmstat.c
> +++ b/mm/vmstat.c
> @@ -1079,7 +1079,7 @@ static int __fragmentation_index(unsigned int order, struct contig_page_info *in
>   * It is defined as the percentage of pages found in blocks of size
>   * less than 1 << order. It returns values in range [0, 100].
>   */
> -int extfrag_for_order(struct zone *zone, unsigned int order)
> +unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
>  {
>  	struct contig_page_info info;
>  
> -- 
> 2.27.0
> 
>
Nitin Gupta June 18, 2020, 2:24 p.m. UTC | #2
On 6/18/20 6:41 AM, Baoquan He wrote:
> On 06/17/20 at 06:03pm, Nitin Gupta wrote:
>> Proactive compaction uses per-node/zone "fragmentation score" which
>> is always in range [0, 100], so use unsigned type of these scores
>> as well as for related constants.
>>
>> Signed-off-by: Nitin Gupta <nigupta@nvidia.com>
>> ---
>>  include/linux/compaction.h |  4 ++--
>>  kernel/sysctl.c            |  2 +-
>>  mm/compaction.c            | 18 +++++++++---------
>>  mm/vmstat.c                |  2 +-
>>  4 files changed, 13 insertions(+), 13 deletions(-)
>>
>> diff --git a/include/linux/compaction.h b/include/linux/compaction.h
>> index 7a242d46454e..25a521d299c1 100644
>> --- a/include/linux/compaction.h
>> +++ b/include/linux/compaction.h
>> @@ -85,13 +85,13 @@ static inline unsigned long compact_gap(unsigned int order)
>>  
>>  #ifdef CONFIG_COMPACTION
>>  extern int sysctl_compact_memory;
>> -extern int sysctl_compaction_proactiveness;
>> +extern unsigned int sysctl_compaction_proactiveness;
>>  extern int sysctl_compaction_handler(struct ctl_table *table, int write,
>>  			void *buffer, size_t *length, loff_t *ppos);
>>  extern int sysctl_extfrag_threshold;
>>  extern int sysctl_compact_unevictable_allowed;
>>  
>> -extern int extfrag_for_order(struct zone *zone, unsigned int order);
>> +extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
>>  extern int fragmentation_index(struct zone *zone, unsigned int order);
>>  extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
>>  		unsigned int order, unsigned int alloc_flags,
>> diff --git a/kernel/sysctl.c b/kernel/sysctl.c
>> index 58b0a59c9769..40180cdde486 100644
>> --- a/kernel/sysctl.c
>> +++ b/kernel/sysctl.c
>> @@ -2833,7 +2833,7 @@ static struct ctl_table vm_table[] = {
>>  	{
>>  		.procname	= "compaction_proactiveness",
>>  		.data		= &sysctl_compaction_proactiveness,
>> -		.maxlen		= sizeof(int),
>> +		.maxlen		= sizeof(sysctl_compaction_proactiveness),
> 
> Patch looks good to me. Wondering why not using 'unsigned int' here,
> just curious.
> 


It's just coding style preference. I see the same style used for many
other sysctls too (min_free_kbytes etc.).

Thanks,
Nitin
Baoquan He June 19, 2020, 12:42 p.m. UTC | #3
On 06/17/20 at 06:03pm, Nitin Gupta wrote:
> Proactive compaction uses per-node/zone "fragmentation score" which
> is always in range [0, 100], so use unsigned type of these scores
> as well as for related constants.
> 
> Signed-off-by: Nitin Gupta <nigupta@nvidia.com>

Reviewed-by: Baoquan He <bhe@redhat.com>

> ---
>  include/linux/compaction.h |  4 ++--
>  kernel/sysctl.c            |  2 +-
>  mm/compaction.c            | 18 +++++++++---------
>  mm/vmstat.c                |  2 +-
>  4 files changed, 13 insertions(+), 13 deletions(-)
> 
> diff --git a/include/linux/compaction.h b/include/linux/compaction.h
> index 7a242d46454e..25a521d299c1 100644
> --- a/include/linux/compaction.h
> +++ b/include/linux/compaction.h
> @@ -85,13 +85,13 @@ static inline unsigned long compact_gap(unsigned int order)
>  
>  #ifdef CONFIG_COMPACTION
>  extern int sysctl_compact_memory;
> -extern int sysctl_compaction_proactiveness;
> +extern unsigned int sysctl_compaction_proactiveness;
>  extern int sysctl_compaction_handler(struct ctl_table *table, int write,
>  			void *buffer, size_t *length, loff_t *ppos);
>  extern int sysctl_extfrag_threshold;
>  extern int sysctl_compact_unevictable_allowed;
>  
> -extern int extfrag_for_order(struct zone *zone, unsigned int order);
> +extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
>  extern int fragmentation_index(struct zone *zone, unsigned int order);
>  extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
>  		unsigned int order, unsigned int alloc_flags,
> diff --git a/kernel/sysctl.c b/kernel/sysctl.c
> index 58b0a59c9769..40180cdde486 100644
> --- a/kernel/sysctl.c
> +++ b/kernel/sysctl.c
> @@ -2833,7 +2833,7 @@ static struct ctl_table vm_table[] = {
>  	{
>  		.procname	= "compaction_proactiveness",
>  		.data		= &sysctl_compaction_proactiveness,
> -		.maxlen		= sizeof(int),
> +		.maxlen		= sizeof(sysctl_compaction_proactiveness),
>  		.mode		= 0644,
>  		.proc_handler	= proc_dointvec_minmax,
>  		.extra1		= SYSCTL_ZERO,
> diff --git a/mm/compaction.c b/mm/compaction.c
> index ac2030814edb..45fd24a0ea0b 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -53,7 +53,7 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
>  /*
>   * Fragmentation score check interval for proactive compaction purposes.
>   */
> -static const int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
> +static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
>  
>  /*
>   * Page order with-respect-to which proactive compaction
> @@ -1890,7 +1890,7 @@ static bool kswapd_is_running(pg_data_t *pgdat)
>   * ZONE_DMA32. For smaller zones, the score value remains close to zero,
>   * and thus never exceeds the high threshold for proactive compaction.
>   */
> -static int fragmentation_score_zone(struct zone *zone)
> +static unsigned int fragmentation_score_zone(struct zone *zone)
>  {
>  	unsigned long score;
>  
> @@ -1906,9 +1906,9 @@ static int fragmentation_score_zone(struct zone *zone)
>   * the node's score falls below the low threshold, or one of the back-off
>   * conditions is met.
>   */
> -static int fragmentation_score_node(pg_data_t *pgdat)
> +static unsigned int fragmentation_score_node(pg_data_t *pgdat)
>  {
> -	unsigned long score = 0;
> +	unsigned int score = 0;
>  	int zoneid;
>  
>  	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
> @@ -1921,17 +1921,17 @@ static int fragmentation_score_node(pg_data_t *pgdat)
>  	return score;
>  }
>  
> -static int fragmentation_score_wmark(pg_data_t *pgdat, bool low)
> +static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low)
>  {
> -	int wmark_low;
> +	unsigned int wmark_low;
>  
>  	/*
>  	 * Cap the low watermak to avoid excessive compaction
>  	 * activity in case a user sets the proactivess tunable
>  	 * close to 100 (maximum).
>  	 */
> -	wmark_low = max(100 - sysctl_compaction_proactiveness, 5);
> -	return low ? wmark_low : min(wmark_low + 10, 100);
> +	wmark_low = max(100U - sysctl_compaction_proactiveness, 5U);
> +	return low ? wmark_low : min(wmark_low + 10, 100U);
>  }
>  
>  static bool should_proactive_compact_node(pg_data_t *pgdat)
> @@ -2604,7 +2604,7 @@ int sysctl_compact_memory;
>   * aggressively the kernel should compact memory in the
>   * background. It takes values in the range [0, 100].
>   */
> -int __read_mostly sysctl_compaction_proactiveness = 20;
> +unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
>  
>  /*
>   * This is the entry point for compacting all nodes via
> diff --git a/mm/vmstat.c b/mm/vmstat.c
> index 3e7ba8bce2ba..b1de695b826d 100644
> --- a/mm/vmstat.c
> +++ b/mm/vmstat.c
> @@ -1079,7 +1079,7 @@ static int __fragmentation_index(unsigned int order, struct contig_page_info *in
>   * It is defined as the percentage of pages found in blocks of size
>   * less than 1 << order. It returns values in range [0, 100].
>   */
> -int extfrag_for_order(struct zone *zone, unsigned int order)
> +unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
>  {
>  	struct contig_page_info info;
>  
> -- 
> 2.27.0
> 
>
diff mbox series

Patch

diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 7a242d46454e..25a521d299c1 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -85,13 +85,13 @@  static inline unsigned long compact_gap(unsigned int order)
 
 #ifdef CONFIG_COMPACTION
 extern int sysctl_compact_memory;
-extern int sysctl_compaction_proactiveness;
+extern unsigned int sysctl_compaction_proactiveness;
 extern int sysctl_compaction_handler(struct ctl_table *table, int write,
 			void *buffer, size_t *length, loff_t *ppos);
 extern int sysctl_extfrag_threshold;
 extern int sysctl_compact_unevictable_allowed;
 
-extern int extfrag_for_order(struct zone *zone, unsigned int order);
+extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
 extern int fragmentation_index(struct zone *zone, unsigned int order);
 extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
 		unsigned int order, unsigned int alloc_flags,
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 58b0a59c9769..40180cdde486 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2833,7 +2833,7 @@  static struct ctl_table vm_table[] = {
 	{
 		.procname	= "compaction_proactiveness",
 		.data		= &sysctl_compaction_proactiveness,
-		.maxlen		= sizeof(int),
+		.maxlen		= sizeof(sysctl_compaction_proactiveness),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_minmax,
 		.extra1		= SYSCTL_ZERO,
diff --git a/mm/compaction.c b/mm/compaction.c
index ac2030814edb..45fd24a0ea0b 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -53,7 +53,7 @@  static inline void count_compact_events(enum vm_event_item item, long delta)
 /*
  * Fragmentation score check interval for proactive compaction purposes.
  */
-static const int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
+static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
 
 /*
  * Page order with-respect-to which proactive compaction
@@ -1890,7 +1890,7 @@  static bool kswapd_is_running(pg_data_t *pgdat)
  * ZONE_DMA32. For smaller zones, the score value remains close to zero,
  * and thus never exceeds the high threshold for proactive compaction.
  */
-static int fragmentation_score_zone(struct zone *zone)
+static unsigned int fragmentation_score_zone(struct zone *zone)
 {
 	unsigned long score;
 
@@ -1906,9 +1906,9 @@  static int fragmentation_score_zone(struct zone *zone)
  * the node's score falls below the low threshold, or one of the back-off
  * conditions is met.
  */
-static int fragmentation_score_node(pg_data_t *pgdat)
+static unsigned int fragmentation_score_node(pg_data_t *pgdat)
 {
-	unsigned long score = 0;
+	unsigned int score = 0;
 	int zoneid;
 
 	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
@@ -1921,17 +1921,17 @@  static int fragmentation_score_node(pg_data_t *pgdat)
 	return score;
 }
 
-static int fragmentation_score_wmark(pg_data_t *pgdat, bool low)
+static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low)
 {
-	int wmark_low;
+	unsigned int wmark_low;
 
 	/*
 	 * Cap the low watermak to avoid excessive compaction
 	 * activity in case a user sets the proactivess tunable
 	 * close to 100 (maximum).
 	 */
-	wmark_low = max(100 - sysctl_compaction_proactiveness, 5);
-	return low ? wmark_low : min(wmark_low + 10, 100);
+	wmark_low = max(100U - sysctl_compaction_proactiveness, 5U);
+	return low ? wmark_low : min(wmark_low + 10, 100U);
 }
 
 static bool should_proactive_compact_node(pg_data_t *pgdat)
@@ -2604,7 +2604,7 @@  int sysctl_compact_memory;
  * aggressively the kernel should compact memory in the
  * background. It takes values in the range [0, 100].
  */
-int __read_mostly sysctl_compaction_proactiveness = 20;
+unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
 
 /*
  * This is the entry point for compacting all nodes via
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 3e7ba8bce2ba..b1de695b826d 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1079,7 +1079,7 @@  static int __fragmentation_index(unsigned int order, struct contig_page_info *in
  * It is defined as the percentage of pages found in blocks of size
  * less than 1 << order. It returns values in range [0, 100].
  */
-int extfrag_for_order(struct zone *zone, unsigned int order)
+unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
 {
 	struct contig_page_info info;