diff mbox series

[v3,1/7] hugetlb: code clean for hugetlb_hstate_alloc_pages

Message ID 20240102131249.76622-2-gang.li@linux.dev (mailing list archive)
State New
Headers show
Series hugetlb: parallelize hugetlb page init on boot | expand

Commit Message

Gang Li Jan. 2, 2024, 1:12 p.m. UTC
The readability of `hugetlb_hstate_alloc_pages` is poor. By cleaning the
code, its readability can be improved, facilitating future modifications.

This patch extracts two functions to reduce the complexity of
`hugetlb_hstate_alloc_pages` and has no functional changes.

- hugetlb_hstate_alloc_pages_node_specific() to handle iterates through
  each online node and performs allocation if necessary.
- hugetlb_hstate_alloc_pages_report() report error during allocation.
  And the value of h->max_huge_pages is updated accordingly.

Signed-off-by: Gang Li <gang.li@linux.dev>
---
 mm/hugetlb.c | 46 +++++++++++++++++++++++++++++-----------------
 1 file changed, 29 insertions(+), 17 deletions(-)

Comments

Muchun Song Jan. 10, 2024, 10:19 a.m. UTC | #1
On 2024/1/2 21:12, Gang Li wrote:
> The readability of `hugetlb_hstate_alloc_pages` is poor. By cleaning the
> code, its readability can be improved, facilitating future modifications.
>
> This patch extracts two functions to reduce the complexity of
> `hugetlb_hstate_alloc_pages` and has no functional changes.
>
> - hugetlb_hstate_alloc_pages_node_specific() to handle iterates through
>    each online node and performs allocation if necessary.
> - hugetlb_hstate_alloc_pages_report() report error during allocation.
>    And the value of h->max_huge_pages is updated accordingly.
>
> Signed-off-by: Gang Li <gang.li@linux.dev>
> ---
>   mm/hugetlb.c | 46 +++++++++++++++++++++++++++++-----------------
>   1 file changed, 29 insertions(+), 17 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index ed1581b670d42..2606135ec55e6 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -3482,6 +3482,33 @@ static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
>   	h->max_huge_pages_node[nid] = i;
>   }
>   
> +static bool __init hugetlb_hstate_alloc_pages_node_specific(struct hstate *h)

I'd like to rename this to hugetlb_hstate_alloc_pages_specific_nodes.

Otherwise, LGTM.

Reviewed-by: Muchun Song <muchun.song@linux.dev>

> +{
> +	int i;
> +	bool node_specific_alloc = false;
> +
> +	for_each_online_node(i) {
> +		if (h->max_huge_pages_node[i] > 0) {
> +			hugetlb_hstate_alloc_pages_onenode(h, i);
> +			node_specific_alloc = true;
> +		}
> +	}
> +
> +	return node_specific_alloc;
> +}
> +
> +static void __init hugetlb_hstate_alloc_pages_report(unsigned long allocated, struct hstate *h)
> +{
> +	if (allocated < h->max_huge_pages) {
> +		char buf[32];
> +
> +		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
> +		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
> +			h->max_huge_pages, buf, allocated);
> +		h->max_huge_pages = allocated;
> +	}
> +}
> +
>   /*
>    * NOTE: this routine is called in different contexts for gigantic and
>    * non-gigantic pages.
> @@ -3499,7 +3526,6 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
>   	struct folio *folio;
>   	LIST_HEAD(folio_list);
>   	nodemask_t *node_alloc_noretry;
> -	bool node_specific_alloc = false;
>   
>   	/* skip gigantic hugepages allocation if hugetlb_cma enabled */
>   	if (hstate_is_gigantic(h) && hugetlb_cma_size) {
> @@ -3508,14 +3534,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
>   	}
>   
>   	/* do node specific alloc */
> -	for_each_online_node(i) {
> -		if (h->max_huge_pages_node[i] > 0) {
> -			hugetlb_hstate_alloc_pages_onenode(h, i);
> -			node_specific_alloc = true;
> -		}
> -	}
> -
> -	if (node_specific_alloc)
> +	if (hugetlb_hstate_alloc_pages_node_specific(h))
>   		return;
>   
>   	/* below will do all node balanced alloc */
> @@ -3558,14 +3577,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
>   	/* list will be empty if hstate_is_gigantic */
>   	prep_and_add_allocated_folios(h, &folio_list);
>   
> -	if (i < h->max_huge_pages) {
> -		char buf[32];
> -
> -		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
> -		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
> -			h->max_huge_pages, buf, i);
> -		h->max_huge_pages = i;
> -	}
> +	hugetlb_hstate_alloc_pages_report(i, h);
>   	kfree(node_alloc_noretry);
>   }
>
Tim Chen Jan. 10, 2024, 9:55 p.m. UTC | #2
On Tue, 2024-01-02 at 21:12 +0800, Gang Li wrote:
> The readability of `hugetlb_hstate_alloc_pages` is poor. By cleaning the
> code, its readability can be improved, facilitating future modifications.
> 
> This patch extracts two functions to reduce the complexity of
> `hugetlb_hstate_alloc_pages` and has no functional changes.
> 
> - hugetlb_hstate_alloc_pages_node_specific() to handle iterates through
>   each online node and performs allocation if necessary.
> - hugetlb_hstate_alloc_pages_report() report error during allocation.
>   And the value of h->max_huge_pages is updated accordingly.

Minor nit, I think hugetlb_hstate_alloc_pages_errcheck() is more
descriptive than hugetlb_hstate_alloc_pages_report().

Otherwise

Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>

> 
> Signed-off-by: Gang Li <gang.li@linux.dev>
> ---
>  mm/hugetlb.c | 46 +++++++++++++++++++++++++++++-----------------
>  1 file changed, 29 insertions(+), 17 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index ed1581b670d42..2606135ec55e6 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -3482,6 +3482,33 @@ static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
>  	h->max_huge_pages_node[nid] = i;
>  }
>  
> +static bool __init hugetlb_hstate_alloc_pages_node_specific(struct hstate *h)
> +{
> +	int i;
> +	bool node_specific_alloc = false;
> +
> +	for_each_online_node(i) {
> +		if (h->max_huge_pages_node[i] > 0) {
> +			hugetlb_hstate_alloc_pages_onenode(h, i);
> +			node_specific_alloc = true;
> +		}
> +	}
> +
> +	return node_specific_alloc;
> +}
> +
> +static void __init hugetlb_hstate_alloc_pages_report(unsigned long allocated, struct hstate *h)
> +{
> +	if (allocated < h->max_huge_pages) {
> +		char buf[32];
> +
> +		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
> +		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
> +			h->max_huge_pages, buf, allocated);
> +		h->max_huge_pages = allocated;
> +	}
> +}
> +
>  /*
>   * NOTE: this routine is called in different contexts for gigantic and
>   * non-gigantic pages.
> @@ -3499,7 +3526,6 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
>  	struct folio *folio;
>  	LIST_HEAD(folio_list);
>  	nodemask_t *node_alloc_noretry;
> -	bool node_specific_alloc = false;
>  
>  	/* skip gigantic hugepages allocation if hugetlb_cma enabled */
>  	if (hstate_is_gigantic(h) && hugetlb_cma_size) {
> @@ -3508,14 +3534,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
>  	}
>  
>  	/* do node specific alloc */
> -	for_each_online_node(i) {
> -		if (h->max_huge_pages_node[i] > 0) {
> -			hugetlb_hstate_alloc_pages_onenode(h, i);
> -			node_specific_alloc = true;
> -		}
> -	}
> -
> -	if (node_specific_alloc)
> +	if (hugetlb_hstate_alloc_pages_node_specific(h))
>  		return;
>  
>  	/* below will do all node balanced alloc */
> @@ -3558,14 +3577,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
>  	/* list will be empty if hstate_is_gigantic */
>  	prep_and_add_allocated_folios(h, &folio_list);
>  
> -	if (i < h->max_huge_pages) {
> -		char buf[32];
> -
> -		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
> -		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
> -			h->max_huge_pages, buf, i);
> -		h->max_huge_pages = i;
> -	}
> +	hugetlb_hstate_alloc_pages_report(i, h);
>  	kfree(node_alloc_noretry);
>  }
>
Gang Li Jan. 11, 2024, 3:30 a.m. UTC | #3
On 2024/1/10 18:19, Muchun Song wrote:
> 
> 
> On 2024/1/2 21:12, Gang Li wrote:
>> The readability of `hugetlb_hstate_alloc_pages` is poor. By cleaning the
>> code, its readability can be improved, facilitating future modifications.
>>
>> This patch extracts two functions to reduce the complexity of
>> `hugetlb_hstate_alloc_pages` and has no functional changes.
>>
>> - hugetlb_hstate_alloc_pages_node_specific() to handle iterates through
>>    each online node and performs allocation if necessary.
>> - hugetlb_hstate_alloc_pages_report() report error during allocation.
>>    And the value of h->max_huge_pages is updated accordingly.
>>
>> Signed-off-by: Gang Li <gang.li@linux.dev>
>> ---
>>   mm/hugetlb.c | 46 +++++++++++++++++++++++++++++-----------------
>>   1 file changed, 29 insertions(+), 17 deletions(-)
>>
>> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
>> index ed1581b670d42..2606135ec55e6 100644
>> --- a/mm/hugetlb.c
>> +++ b/mm/hugetlb.c
>> @@ -3482,6 +3482,33 @@ static void __init 
>> hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
>>       h->max_huge_pages_node[nid] = i;
>>   }
>> +static bool __init hugetlb_hstate_alloc_pages_node_specific(struct 
>> hstate *h)
> 
> I'd like to rename this to hugetlb_hstate_alloc_pages_specific_nodes.
> 
> Otherwise, LGTM.
> 
> Reviewed-by: Muchun Song <muchun.song@linux.dev>
> 

Thanks! I will adjust it in the next version.
Gang Li Jan. 11, 2024, 3:34 a.m. UTC | #4
On 2024/1/11 05:55, Tim Chen wrote:
> On Tue, 2024-01-02 at 21:12 +0800, Gang Li wrote:
>> The readability of `hugetlb_hstate_alloc_pages` is poor. By cleaning the
>> code, its readability can be improved, facilitating future modifications.
>>
>> This patch extracts two functions to reduce the complexity of
>> `hugetlb_hstate_alloc_pages` and has no functional changes.
>>
>> - hugetlb_hstate_alloc_pages_node_specific() to handle iterates through
>>    each online node and performs allocation if necessary.
>> - hugetlb_hstate_alloc_pages_report() report error during allocation.
>>    And the value of h->max_huge_pages is updated accordingly.
> 
> Minor nit, I think hugetlb_hstate_alloc_pages_errcheck() is more
> descriptive than hugetlb_hstate_alloc_pages_report().

Thanks! This looks more intuitive.

> 
> Otherwise
> 
> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
>
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ed1581b670d42..2606135ec55e6 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3482,6 +3482,33 @@  static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
 	h->max_huge_pages_node[nid] = i;
 }
 
+static bool __init hugetlb_hstate_alloc_pages_node_specific(struct hstate *h)
+{
+	int i;
+	bool node_specific_alloc = false;
+
+	for_each_online_node(i) {
+		if (h->max_huge_pages_node[i] > 0) {
+			hugetlb_hstate_alloc_pages_onenode(h, i);
+			node_specific_alloc = true;
+		}
+	}
+
+	return node_specific_alloc;
+}
+
+static void __init hugetlb_hstate_alloc_pages_report(unsigned long allocated, struct hstate *h)
+{
+	if (allocated < h->max_huge_pages) {
+		char buf[32];
+
+		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
+		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
+			h->max_huge_pages, buf, allocated);
+		h->max_huge_pages = allocated;
+	}
+}
+
 /*
  * NOTE: this routine is called in different contexts for gigantic and
  * non-gigantic pages.
@@ -3499,7 +3526,6 @@  static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
 	struct folio *folio;
 	LIST_HEAD(folio_list);
 	nodemask_t *node_alloc_noretry;
-	bool node_specific_alloc = false;
 
 	/* skip gigantic hugepages allocation if hugetlb_cma enabled */
 	if (hstate_is_gigantic(h) && hugetlb_cma_size) {
@@ -3508,14 +3534,7 @@  static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
 	}
 
 	/* do node specific alloc */
-	for_each_online_node(i) {
-		if (h->max_huge_pages_node[i] > 0) {
-			hugetlb_hstate_alloc_pages_onenode(h, i);
-			node_specific_alloc = true;
-		}
-	}
-
-	if (node_specific_alloc)
+	if (hugetlb_hstate_alloc_pages_node_specific(h))
 		return;
 
 	/* below will do all node balanced alloc */
@@ -3558,14 +3577,7 @@  static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
 	/* list will be empty if hstate_is_gigantic */
 	prep_and_add_allocated_folios(h, &folio_list);
 
-	if (i < h->max_huge_pages) {
-		char buf[32];
-
-		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
-		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
-			h->max_huge_pages, buf, i);
-		h->max_huge_pages = i;
-	}
+	hugetlb_hstate_alloc_pages_report(i, h);
 	kfree(node_alloc_noretry);
 }