diff mbox series

[08/12] hugetlb: batch freeing of vmemmap pages

Message ID 20230825190436.55045-9-mike.kravetz@oracle.com (mailing list archive)
State New
Headers show
Series Batch hugetlb vmemmap modification operations | expand

Commit Message

Mike Kravetz Aug. 25, 2023, 7:04 p.m. UTC
Now that batching of hugetlb vmemmap optimization processing is possible,
batch the freeing of vmemmap pages.  When freeing vmemmap pages for a
hugetlb page, we add them to a list that is freed after the entire batch
has been processed.

This enhances the ability to return contiguous ranges of memory to the
low level allocators.

Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
---
 mm/hugetlb_vmemmap.c | 56 ++++++++++++++++++++++++++++++++------------
 1 file changed, 41 insertions(+), 15 deletions(-)

Comments

kernel test robot Aug. 26, 2023, 4 a.m. UTC | #1
Hi Mike,

kernel test robot noticed the following build warnings:

[auto build test WARNING on next-20230825]
[cannot apply to akpm-mm/mm-everything v6.5-rc7 v6.5-rc6 v6.5-rc5 linus/master v6.5-rc7]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Mike-Kravetz/hugetlb-clear-flags-in-tail-pages-that-will-be-freed-individually/20230826-030805
base:   next-20230825
patch link:    https://lore.kernel.org/r/20230825190436.55045-9-mike.kravetz%40oracle.com
patch subject: [PATCH 08/12] hugetlb: batch freeing of vmemmap pages
config: s390-randconfig-001-20230826 (https://download.01.org/0day-ci/archive/20230826/202308261129.hdlNvI3X-lkp@intel.com/config)
compiler: clang version 17.0.0 (https://github.com/llvm/llvm-project.git 4a5ac14ee968ff0ad5d2cc1ffa0299048db4c88a)
reproduce: (https://download.01.org/0day-ci/archive/20230826/202308261129.hdlNvI3X-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202308261129.hdlNvI3X-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> mm/hugetlb_vmemmap.c:601:6: warning: no previous prototype for function 'hugetlb_vmemmap_optimize_bulk' [-Wmissing-prototypes]
     601 | void hugetlb_vmemmap_optimize_bulk(const struct hstate *h, struct page *head,
         |      ^
   mm/hugetlb_vmemmap.c:601:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
     601 | void hugetlb_vmemmap_optimize_bulk(const struct hstate *h, struct page *head,
         | ^
         | static 
   1 warning generated.


vim +/hugetlb_vmemmap_optimize_bulk +601 mm/hugetlb_vmemmap.c

   600	
 > 601	void hugetlb_vmemmap_optimize_bulk(const struct hstate *h, struct page *head,
   602				struct list_head *bulk_pages)
   603	{
   604		__hugetlb_vmemmap_optimize(h, head, bulk_pages);
   605	}
   606
Muchun Song Aug. 30, 2023, 7:20 a.m. UTC | #2
On 2023/8/26 03:04, Mike Kravetz wrote:
> Now that batching of hugetlb vmemmap optimization processing is possible,
> batch the freeing of vmemmap pages.  When freeing vmemmap pages for a
> hugetlb page, we add them to a list that is freed after the entire batch
> has been processed.
>
> This enhances the ability to return contiguous ranges of memory to the
> low level allocators.
>
> Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
> ---
>   mm/hugetlb_vmemmap.c | 56 ++++++++++++++++++++++++++++++++------------
>   1 file changed, 41 insertions(+), 15 deletions(-)
>
> diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> index d5e6b6c76dce..e390170c0887 100644
> --- a/mm/hugetlb_vmemmap.c
> +++ b/mm/hugetlb_vmemmap.c
> @@ -305,11 +305,14 @@ static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
>    * @end:	end address of the vmemmap virtual address range that we want to
>    *		remap.
>    * @reuse:	reuse address.
> + * @bulk_pages: list to deposit vmemmap pages to be freed in bulk operations
> + *		or NULL in non-bulk case;

I'd like to rename bulk_pages to vmemmap_pages. Always add the vmemmap
pages to this list and let the caller (hugetlb_vmemmap_optimize and
hugetlb_vmemmap_optimize_folios) to help us to free them. It will be
clear to me.

>    *
>    * Return: %0 on success, negative error code otherwise.
>    */
>   static int vmemmap_remap_free(unsigned long start, unsigned long end,
> -			      unsigned long reuse)
> +			      unsigned long reuse,
> +			      struct list_head *bulk_pages)
>   {
>   	int ret;
>   	LIST_HEAD(vmemmap_pages);
> @@ -372,7 +375,14 @@ static int vmemmap_remap_free(unsigned long start, unsigned long end,
>   	}
>   	mmap_read_unlock(&init_mm);
>   
> -	free_vmemmap_page_list(&vmemmap_pages);
> +	/*
> +	 * if performing bulk operation, do not free pages here.
> +	 * rather add them to the bulk list
> +	 */
> +	if (!bulk_pages)
> +		free_vmemmap_page_list(&vmemmap_pages);
> +	else
> +		list_splice(&vmemmap_pages, bulk_pages);

Here, always add vmemmap_pages to the list.

>   
>   	return ret;
>   }
> @@ -546,17 +556,9 @@ static bool vmemmap_should_optimize(const struct hstate *h, const struct page *h
>   	return true;
>   }
>   
> -/**
> - * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages.
> - * @h:		struct hstate.
> - * @head:	the head page whose vmemmap pages will be optimized.
> - *
> - * This function only tries to optimize @head's vmemmap pages and does not
> - * guarantee that the optimization will succeed after it returns. The caller
> - * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages
> - * have been optimized.
> - */
> -void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
> +static void __hugetlb_vmemmap_optimize(const struct hstate *h,
> +					struct page *head,
> +					struct list_head *bulk_pages)

Also struct list_head *vmemmap_pages.

>   {
>   	unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
>   	unsigned long vmemmap_reuse;
> @@ -575,18 +577,42 @@ void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
>   	 * to the page which @vmemmap_reuse is mapped to, then free the pages
>   	 * which the range [@vmemmap_start, @vmemmap_end] is mapped to.
>   	 */
> -	if (vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse))
> +	if (vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse, bulk_pages))
>   		static_branch_dec(&hugetlb_optimize_vmemmap_key);
>   	else
>   		SetHPageVmemmapOptimized(head);
>   }
>   
> +/**
> + * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages.
> + * @h:		struct hstate.
> + * @head:	the head page whose vmemmap pages will be optimized.
> + *
> + * This function only tries to optimize @head's vmemmap pages and does not
> + * guarantee that the optimization will succeed after it returns. The caller
> + * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages
> + * have been optimized.
> + */
> +void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
> +{
> +	__hugetlb_vmemmap_optimize(h, head, NULL);

Use free_vmemmap_page_list to free vmemmap pages here.

> +}
> +
> +void hugetlb_vmemmap_optimize_bulk(const struct hstate *h, struct page *head,
> +			struct list_head *bulk_pages)
> +{
> +	__hugetlb_vmemmap_optimize(h, head, bulk_pages);
> +}
> +
>   void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
>   {
>   	struct folio *folio;
> +	LIST_HEAD(vmemmap_pages);
>   
>   	list_for_each_entry(folio, folio_list, lru)
> -		hugetlb_vmemmap_optimize(h, &folio->page);
> +		hugetlb_vmemmap_optimize_bulk(h, &folio->page, &vmemmap_pages);

Directly use __hugetlb_vmemmap_optimize and delete 
hugetlb_vmemmap_optimize_bulk.
In the future, we could rename hugetlb_vmemmap_optimize to 
hugetlb_vmemmap_optimize_folio,
then, both function names are more consistent. E.g.

   1) hugetlb_vmemmap_optimize_folio(): used to free one folio's vmemmap 
pages.
   2) hugetlb_vmemmap_optimize_folios(): used to free multiple folio's 
vmemmap pages.

Thanks.

> +
> +	free_vmemmap_page_list(&vmemmap_pages);
>   }
>   
>   static struct ctl_table hugetlb_vmemmap_sysctls[] = {
Mike Kravetz Aug. 30, 2023, 6:36 p.m. UTC | #3
On 08/30/23 15:20, Muchun Song wrote:
> 
> 
> On 2023/8/26 03:04, Mike Kravetz wrote:
> > Now that batching of hugetlb vmemmap optimization processing is possible,
> > batch the freeing of vmemmap pages.  When freeing vmemmap pages for a
> > hugetlb page, we add them to a list that is freed after the entire batch
> > has been processed.
> > 
> > This enhances the ability to return contiguous ranges of memory to the
> > low level allocators.
> > 
> > Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
> > ---
> >   mm/hugetlb_vmemmap.c | 56 ++++++++++++++++++++++++++++++++------------
> >   1 file changed, 41 insertions(+), 15 deletions(-)
> > 
> > diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> > index d5e6b6c76dce..e390170c0887 100644
> > --- a/mm/hugetlb_vmemmap.c
> > +++ b/mm/hugetlb_vmemmap.c
> > @@ -305,11 +305,14 @@ static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
> >    * @end:	end address of the vmemmap virtual address range that we want to
> >    *		remap.
> >    * @reuse:	reuse address.
> > + * @bulk_pages: list to deposit vmemmap pages to be freed in bulk operations
> > + *		or NULL in non-bulk case;
> 
> I'd like to rename bulk_pages to vmemmap_pages. Always add the vmemmap
> pages to this list and let the caller (hugetlb_vmemmap_optimize and
> hugetlb_vmemmap_optimize_folios) to help us to free them. It will be
> clear to me.

Makes sense.

I will update all these in next version based on your suggestions.

Thank you,
diff mbox series

Patch

diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index d5e6b6c76dce..e390170c0887 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -305,11 +305,14 @@  static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
  * @end:	end address of the vmemmap virtual address range that we want to
  *		remap.
  * @reuse:	reuse address.
+ * @bulk_pages: list to deposit vmemmap pages to be freed in bulk operations
+ *		or NULL in non-bulk case;
  *
  * Return: %0 on success, negative error code otherwise.
  */
 static int vmemmap_remap_free(unsigned long start, unsigned long end,
-			      unsigned long reuse)
+			      unsigned long reuse,
+			      struct list_head *bulk_pages)
 {
 	int ret;
 	LIST_HEAD(vmemmap_pages);
@@ -372,7 +375,14 @@  static int vmemmap_remap_free(unsigned long start, unsigned long end,
 	}
 	mmap_read_unlock(&init_mm);
 
-	free_vmemmap_page_list(&vmemmap_pages);
+	/*
+	 * if performing bulk operation, do not free pages here.
+	 * rather add them to the bulk list
+	 */
+	if (!bulk_pages)
+		free_vmemmap_page_list(&vmemmap_pages);
+	else
+		list_splice(&vmemmap_pages, bulk_pages);
 
 	return ret;
 }
@@ -546,17 +556,9 @@  static bool vmemmap_should_optimize(const struct hstate *h, const struct page *h
 	return true;
 }
 
-/**
- * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages.
- * @h:		struct hstate.
- * @head:	the head page whose vmemmap pages will be optimized.
- *
- * This function only tries to optimize @head's vmemmap pages and does not
- * guarantee that the optimization will succeed after it returns. The caller
- * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages
- * have been optimized.
- */
-void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
+static void __hugetlb_vmemmap_optimize(const struct hstate *h,
+					struct page *head,
+					struct list_head *bulk_pages)
 {
 	unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
 	unsigned long vmemmap_reuse;
@@ -575,18 +577,42 @@  void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
 	 * to the page which @vmemmap_reuse is mapped to, then free the pages
 	 * which the range [@vmemmap_start, @vmemmap_end] is mapped to.
 	 */
-	if (vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse))
+	if (vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse, bulk_pages))
 		static_branch_dec(&hugetlb_optimize_vmemmap_key);
 	else
 		SetHPageVmemmapOptimized(head);
 }
 
+/**
+ * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages.
+ * @h:		struct hstate.
+ * @head:	the head page whose vmemmap pages will be optimized.
+ *
+ * This function only tries to optimize @head's vmemmap pages and does not
+ * guarantee that the optimization will succeed after it returns. The caller
+ * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages
+ * have been optimized.
+ */
+void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
+{
+	__hugetlb_vmemmap_optimize(h, head, NULL);
+}
+
+void hugetlb_vmemmap_optimize_bulk(const struct hstate *h, struct page *head,
+			struct list_head *bulk_pages)
+{
+	__hugetlb_vmemmap_optimize(h, head, bulk_pages);
+}
+
 void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
 {
 	struct folio *folio;
+	LIST_HEAD(vmemmap_pages);
 
 	list_for_each_entry(folio, folio_list, lru)
-		hugetlb_vmemmap_optimize(h, &folio->page);
+		hugetlb_vmemmap_optimize_bulk(h, &folio->page, &vmemmap_pages);
+
+	free_vmemmap_page_list(&vmemmap_pages);
 }
 
 static struct ctl_table hugetlb_vmemmap_sysctls[] = {