diff mbox series

[3/3] hugetlb: Convert remove_pool_huge_page() to return a folio

Message ID 20230822162808.4131399-3-willy@infradead.org (mailing list archive)
State New
Headers show
Series [1/3] hugetlb: Use a folio in free_hpage_workfn() | expand

Commit Message

Matthew Wilcox Aug. 22, 2023, 4:28 p.m. UTC
Convert the callers to expect a folio and remove the unnecesary conversion
back to a struct page.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
---
 mm/hugetlb.c | 25 +++++++++++++------------
 1 file changed, 13 insertions(+), 12 deletions(-)

Comments

Mike Kravetz Aug. 23, 2023, 10:48 p.m. UTC | #1
On 08/22/23 17:28, Matthew Wilcox (Oracle) wrote:
> Convert the callers to expect a folio and remove the unnecesary conversion
> back to a struct page.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
> ---
>  mm/hugetlb.c | 25 +++++++++++++------------
>  1 file changed, 13 insertions(+), 12 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index d6309edb59e5..283cd5290515 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -2221,9 +2221,8 @@ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
>   * an additional call to free the page to low level allocators.
>   * Called with hugetlb_lock locked.
>   */
> -static struct page *remove_pool_huge_page(struct hstate *h,
> -						nodemask_t *nodes_allowed,
> -						 bool acct_surplus)
> +static struct folio *remove_pool_huge_page(struct hstate *h,
> +		nodemask_t *nodes_allowed, bool acct_surplus)

might have been good to make a simple name change to remove_pool_huge_folio,
but not insisting.

Code looks fine,                                                                
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Matthew Wilcox Aug. 24, 2023, 1:10 a.m. UTC | #2
On Wed, Aug 23, 2023 at 03:48:13PM -0700, Mike Kravetz wrote:
> > -static struct page *remove_pool_huge_page(struct hstate *h,
> > -						nodemask_t *nodes_allowed,
> > -						 bool acct_surplus)
> > +static struct folio *remove_pool_huge_page(struct hstate *h,
> > +		nodemask_t *nodes_allowed, bool acct_surplus)
> 
> might have been good to make a simple name change to remove_pool_huge_folio,
> but not insisting.

I wasn't sure what we were calling it now.  I can go for huge_folio.
Muchun Song Aug. 24, 2023, 2:38 a.m. UTC | #3
> On Aug 24, 2023, at 09:10, Matthew Wilcox <willy@infradead.org> wrote:
> 
> On Wed, Aug 23, 2023 at 03:48:13PM -0700, Mike Kravetz wrote:
>>> -static struct page *remove_pool_huge_page(struct hstate *h,
>>> - nodemask_t *nodes_allowed,
>>> - bool acct_surplus)
>>> +static struct folio *remove_pool_huge_page(struct hstate *h,
>>> + nodemask_t *nodes_allowed, bool acct_surplus)
>> 
>> might have been good to make a simple name change to remove_pool_huge_folio,
>> but not insisting.
> 
> I wasn't sure what we were calling it now.  I can go for huge_folio.
> 

However, there are so many places where it is hugetlb_folio, so I'd suggest
hugetlb_folio.

Thanks.
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d6309edb59e5..283cd5290515 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2221,9 +2221,8 @@  static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
  * an additional call to free the page to low level allocators.
  * Called with hugetlb_lock locked.
  */
-static struct page *remove_pool_huge_page(struct hstate *h,
-						nodemask_t *nodes_allowed,
-						 bool acct_surplus)
+static struct folio *remove_pool_huge_page(struct hstate *h,
+		nodemask_t *nodes_allowed, bool acct_surplus)
 {
 	int nr_nodes, node;
 	struct folio *folio;
@@ -2243,7 +2242,7 @@  static struct page *remove_pool_huge_page(struct hstate *h,
 		}
 	}
 
-	return &folio->page;
+	return folio;
 }
 
 /*
@@ -2597,7 +2596,6 @@  static void return_unused_surplus_pages(struct hstate *h,
 					unsigned long unused_resv_pages)
 {
 	unsigned long nr_pages;
-	struct page *page;
 	LIST_HEAD(page_list);
 
 	lockdep_assert_held(&hugetlb_lock);
@@ -2622,11 +2620,13 @@  static void return_unused_surplus_pages(struct hstate *h,
 	 * on-line nodes with memory and will handle the hstate accounting.
 	 */
 	while (nr_pages--) {
-		page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
-		if (!page)
+		struct folio *folio;
+
+		folio = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
+		if (!folio)
 			goto out;
 
-		list_add(&page->lru, &page_list);
+		list_add(&folio->lru, &page_list);
 	}
 
 out:
@@ -3421,7 +3421,6 @@  static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
 			      nodemask_t *nodes_allowed)
 {
 	unsigned long min_count, ret;
-	struct page *page;
 	LIST_HEAD(page_list);
 	NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
 
@@ -3541,11 +3540,13 @@  static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
 	 * Collect pages to be removed on list without dropping lock
 	 */
 	while (min_count < persistent_huge_pages(h)) {
-		page = remove_pool_huge_page(h, nodes_allowed, 0);
-		if (!page)
+		struct folio *folio;
+
+		folio = remove_pool_huge_page(h, nodes_allowed, 0);
+		if (!folio)
 			break;
 
-		list_add(&page->lru, &page_list);
+		list_add(&folio->lru, &page_list);
 	}
 	/* free the pages after dropping lock */
 	spin_unlock_irq(&hugetlb_lock);