diff mbox series

mm/hugetlb: convert dissolve_free_huge_pages() to folios

Message ID 20240411164756.261178-1-sidhartha.kumar@oracle.com (mailing list archive)
State New
Headers show
Series mm/hugetlb: convert dissolve_free_huge_pages() to folios | expand

Commit Message

Sidhartha Kumar April 11, 2024, 4:47 p.m. UTC
Allows us to rename dissolve_free_huge_pages() to
dissolve_free_hugetlb_folio(). Convert one caller to pass in a folio
directly and use page_folio() to convert the caller in mm/memory-failure.

Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
---
 include/linux/hugetlb.h |  4 ++--
 mm/hugetlb.c            | 15 +++++++--------
 mm/memory-failure.c     |  4 ++--
 3 files changed, 11 insertions(+), 12 deletions(-)

Comments

Oscar Salvador April 12, 2024, 4:46 a.m. UTC | #1
On Thu, Apr 11, 2024 at 09:47:56AM -0700, Sidhartha Kumar wrote:
> Allows us to rename dissolve_free_huge_pages() to
> dissolve_free_hugetlb_folio(). Convert one caller to pass in a folio
> directly and use page_folio() to convert the caller in mm/memory-failure.
> 
> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>

Reviewed-by: Oscar Salvador <osalvador@suse.de>
Miaohe Lin April 12, 2024, 8:08 a.m. UTC | #2
On 2024/4/12 0:47, Sidhartha Kumar wrote:
> Allows us to rename dissolve_free_huge_pages() to
> dissolve_free_hugetlb_folio(). Convert one caller to pass in a folio
> directly and use page_folio() to convert the caller in mm/memory-failure.
> 
> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>

Thanks for your patch. Some nits below.

> ---
>  include/linux/hugetlb.h |  4 ++--
>  mm/hugetlb.c            | 15 +++++++--------
>  mm/memory-failure.c     |  4 ++--
>  3 files changed, 11 insertions(+), 12 deletions(-)
> 
>  
>  /*
> - * Dissolve a given free hugepage into free buddy pages. This function does
> - * nothing for in-use hugepages and non-hugepages.
> + * Dissolve a given free hugetlb folio into free buddy pages. This function
> + * does nothing for in-use hugepages and non-hugepages.

in-use hugetlb folio and non-hugetlb folio?

>   * This function returns values like below:
>   *
>   *  -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
> @@ -2390,10 +2390,9 @@ static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
>   *       0:  successfully dissolved free hugepages or the page is not a
>   *           hugepage (considered as already dissolved)
>   */
> -int dissolve_free_huge_page(struct page *page)
> +int dissolve_free_hugetlb_folio(struct folio *folio)
>  {
>  	int rc = -EBUSY;
> -	struct folio *folio = page_folio(page);
>  
>  retry:
>  	/* Not to disrupt normal path by vainly holding hugetlb_lock */
> @@ -2470,13 +2469,13 @@ int dissolve_free_huge_page(struct page *page)
>   * make specified memory blocks removable from the system.
>   * Note that this will dissolve a free gigantic hugepage completely, if any
>   * part of it lies within the given range.
> - * Also note that if dissolve_free_huge_page() returns with an error, all
> + * Also note that if dissolve_free_hugetlb_folio() returns with an error, all
>   * free hugepages that were dissolved before that error are lost.

free hugetlb folio?

>   */
>  int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
>  {
>  	unsigned long pfn;
> -	struct page *page;
> +	struct folio *folio;
>  	int rc = 0;
>  	unsigned int order;
>  	struct hstate *h;
> @@ -2489,8 +2488,8 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
>  		order = min(order, huge_page_order(h));
>  
>  	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
> -		page = pfn_to_page(pfn);
> -		rc = dissolve_free_huge_page(page);
> +		folio = pfn_folio(pfn);
> +		rc = dissolve_free_hugetlb_folio(folio);
>  		if (rc)
>  			break;
>  	}
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index 88359a185c5f9..5a6062b61c44d 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -155,11 +155,11 @@ static int __page_handle_poison(struct page *page)
>  
>  	/*
>  	 * zone_pcp_disable() can't be used here. It will hold pcp_batch_high_lock and
> -	 * dissolve_free_huge_page() might hold cpu_hotplug_lock via static_key_slow_dec()
> +	 * dissolve_free_hugetlb_folio() might hold cpu_hotplug_lock via static_key_slow_dec()
>  	 * when hugetlb vmemmap optimization is enabled. This will break current lock
>  	 * dependency chain and leads to deadlock.
>  	 */
> -	ret = dissolve_free_huge_page(page);
> +	ret = dissolve_free_hugetlb_folio(page_folio(page));
>  	if (!ret) {
>  		drain_all_pages(page_zone(page));
>  		ret = take_page_off_buddy(page);

There is a comment in page_handle_poison referring to dissolve_free_huge_page. It might be better to change it too?

static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
{
	if (hugepage_or_freepage) {
		/*
		 * Doing this check for free pages is also fine since *dissolve_free_huge_page*
		 * returns 0 for non-hugetlb pages as well.
		 */
		if (__page_handle_poison(page) <= 0)
			/*
			 * We could fail to take off the target page from buddy
			 * for example due to racy page allocation, but that's
			 * acceptable because soft-offlined page is not broken
			 * and if someone really want to use it, they should
			 * take it.
			 */
			return false;
	}

Thanks.
.

>
diff mbox series

Patch

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 3f3e628802792..f4191b10345d6 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -861,7 +861,7 @@  static inline int hstate_index(struct hstate *h)
 	return h - hstates;
 }
 
-extern int dissolve_free_huge_page(struct page *page);
+extern int dissolve_free_hugetlb_folio(struct folio *folio);
 extern int dissolve_free_huge_pages(unsigned long start_pfn,
 				    unsigned long end_pfn);
 
@@ -1148,7 +1148,7 @@  static inline int hstate_index(struct hstate *h)
 	return 0;
 }
 
-static inline int dissolve_free_huge_page(struct page *page)
+static inline int dissolve_free_hugetlb_folio(struct folio *folio)
 {
 	return 0;
 }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 454900c84b303..617f8bec6eb42 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2377,8 +2377,8 @@  static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
 }
 
 /*
- * Dissolve a given free hugepage into free buddy pages. This function does
- * nothing for in-use hugepages and non-hugepages.
+ * Dissolve a given free hugetlb folio into free buddy pages. This function
+ * does nothing for in-use hugepages and non-hugepages.
  * This function returns values like below:
  *
  *  -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
@@ -2390,10 +2390,9 @@  static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
  *       0:  successfully dissolved free hugepages or the page is not a
  *           hugepage (considered as already dissolved)
  */
-int dissolve_free_huge_page(struct page *page)
+int dissolve_free_hugetlb_folio(struct folio *folio)
 {
 	int rc = -EBUSY;
-	struct folio *folio = page_folio(page);
 
 retry:
 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
@@ -2470,13 +2469,13 @@  int dissolve_free_huge_page(struct page *page)
  * make specified memory blocks removable from the system.
  * Note that this will dissolve a free gigantic hugepage completely, if any
  * part of it lies within the given range.
- * Also note that if dissolve_free_huge_page() returns with an error, all
+ * Also note that if dissolve_free_hugetlb_folio() returns with an error, all
  * free hugepages that were dissolved before that error are lost.
  */
 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
 {
 	unsigned long pfn;
-	struct page *page;
+	struct folio *folio;
 	int rc = 0;
 	unsigned int order;
 	struct hstate *h;
@@ -2489,8 +2488,8 @@  int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
 		order = min(order, huge_page_order(h));
 
 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
-		page = pfn_to_page(pfn);
-		rc = dissolve_free_huge_page(page);
+		folio = pfn_folio(pfn);
+		rc = dissolve_free_hugetlb_folio(folio);
 		if (rc)
 			break;
 	}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 88359a185c5f9..5a6062b61c44d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -155,11 +155,11 @@  static int __page_handle_poison(struct page *page)
 
 	/*
 	 * zone_pcp_disable() can't be used here. It will hold pcp_batch_high_lock and
-	 * dissolve_free_huge_page() might hold cpu_hotplug_lock via static_key_slow_dec()
+	 * dissolve_free_hugetlb_folio() might hold cpu_hotplug_lock via static_key_slow_dec()
 	 * when hugetlb vmemmap optimization is enabled. This will break current lock
 	 * dependency chain and leads to deadlock.
 	 */
-	ret = dissolve_free_huge_page(page);
+	ret = dissolve_free_hugetlb_folio(page_folio(page));
 	if (!ret) {
 		drain_all_pages(page_zone(page));
 		ret = take_page_off_buddy(page);