diff mbox series

[v2,11/11] hugetlb: batch TLB flushes when restoring vmemmap

Message ID 20230905214412.89152-12-mike.kravetz@oracle.com (mailing list archive)
State New
Headers show
Series Batch hugetlb vmemmap modification operations | expand

Commit Message

Mike Kravetz Sept. 5, 2023, 9:44 p.m. UTC
Update the hugetlb_vmemmap_restore path to take a 'batch' parameter that
indicates restoration is happening on a batch of pages.  When set, use
the existing mechanism (VMEMMAP_NO_TLB_FLUSH) to delay TLB flushing.
The routine hugetlb_vmemmap_restore_folios is the only user of this new
batch parameter and it will perform a global flush after all vmemmap is
restored.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
---
 mm/hugetlb_vmemmap.c | 37 +++++++++++++++++++++++--------------
 1 file changed, 23 insertions(+), 14 deletions(-)

Comments

Muchun Song Sept. 7, 2023, 6:58 a.m. UTC | #1
On 2023/9/6 05:44, Mike Kravetz wrote:
> Update the hugetlb_vmemmap_restore path to take a 'batch' parameter that

s/batch/flags/g

And it should be reworked since the parameter has been changed.

> indicates restoration is happening on a batch of pages.  When set, use
> the existing mechanism (VMEMMAP_NO_TLB_FLUSH) to delay TLB flushing.
> The routine hugetlb_vmemmap_restore_folios is the only user of this new
> batch parameter and it will perform a global flush after all vmemmap is
> restored.
>
> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
> Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
> ---
>   mm/hugetlb_vmemmap.c | 37 +++++++++++++++++++++++--------------
>   1 file changed, 23 insertions(+), 14 deletions(-)
>
> diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> index 8c85e2c38538..11fda9d061eb 100644
> --- a/mm/hugetlb_vmemmap.c
> +++ b/mm/hugetlb_vmemmap.c
> @@ -458,17 +458,19 @@ static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
>    * @end:	end address of the vmemmap virtual address range that we want to
>    *		remap.
>    * @reuse:	reuse address.
> + * @flags:	modify behavior for bulk operations
>    *
>    * Return: %0 on success, negative error code otherwise.
>    */
>   static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
> -			       unsigned long reuse)
> +			       unsigned long reuse, unsigned long flags)
>   {
>   	LIST_HEAD(vmemmap_pages);
>   	struct vmemmap_remap_walk walk = {
>   		.remap_pte	= vmemmap_restore_pte,
>   		.reuse_addr	= reuse,
>   		.vmemmap_pages	= &vmemmap_pages,
> +		.flags		= flags,
>   	};
>   
>   	/* See the comment in the vmemmap_remap_free(). */
> @@ -490,17 +492,7 @@ EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
>   static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
>   core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
>   
> -/**
> - * hugetlb_vmemmap_restore - restore previously optimized (by
> - *			     hugetlb_vmemmap_optimize()) vmemmap pages which
> - *			     will be reallocated and remapped.
> - * @h:		struct hstate.
> - * @head:	the head page whose vmemmap pages will be restored.
> - *
> - * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
> - * negative error code otherwise.
> - */
> -int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
> +static int __hugetlb_vmemmap_restore(const struct hstate *h, struct page *head, unsigned long flags)
>   {
>   	int ret;
>   	unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
> @@ -521,7 +513,7 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
>   	 * When a HugeTLB page is freed to the buddy allocator, previously
>   	 * discarded vmemmap pages must be allocated and remapping.
>   	 */
> -	ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse);
> +	ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse, flags);
>   	if (!ret) {
>   		ClearHPageVmemmapOptimized(head);
>   		static_branch_dec(&hugetlb_optimize_vmemmap_key);
> @@ -530,6 +522,21 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
>   	return ret;
>   }
>   
> +/**
> + * hugetlb_vmemmap_restore - restore previously optimized (by
> + *			     hugetlb_vmemmap_optimize()) vmemmap pages which
> + *			     will be reallocated and remapped.
> + * @h:		struct hstate.
> + * @head:	the head page whose vmemmap pages will be restored.
> + *
> + * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
> + * negative error code otherwise.
> + */
> +int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
> +{
> +	return __hugetlb_vmemmap_restore(h, head, 0UL);

UL suffix could be drooped.

Thanks.

> +}
> +
>   /*
>    * This function will attempt to resore vmemmap for a list of folios.  There
>    * is no guarantee that restoration will be successful for all or any folios.
> @@ -540,7 +547,9 @@ void hugetlb_vmemmap_restore_folios(const struct hstate *h, struct list_head *fo
>   	struct folio *folio;
>   
>   	list_for_each_entry(folio, folio_list, lru)
> -		(void)hugetlb_vmemmap_restore(h, &folio->page);
> +		(void)__hugetlb_vmemmap_restore(h, &folio->page, VMEMMAP_NO_TLB_FLUSH);
> +
> +	flush_tlb_all();
>   }
>   
>   /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
Mike Kravetz Sept. 7, 2023, 6:58 p.m. UTC | #2
On 09/07/23 14:58, Muchun Song wrote:
> 
> 
> On 2023/9/6 05:44, Mike Kravetz wrote:
> > Update the hugetlb_vmemmap_restore path to take a 'batch' parameter that
> 
> s/batch/flags/g
> 
> And it should be reworked since the parameter has been changed.

Yes.

> 
> > indicates restoration is happening on a batch of pages.  When set, use
> > the existing mechanism (VMEMMAP_NO_TLB_FLUSH) to delay TLB flushing.
> > The routine hugetlb_vmemmap_restore_folios is the only user of this new
> > batch parameter and it will perform a global flush after all vmemmap is
> > restored.
> > 
> > Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
> > Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
> > ---
> >   mm/hugetlb_vmemmap.c | 37 +++++++++++++++++++++++--------------
> >   1 file changed, 23 insertions(+), 14 deletions(-)
> > 
> > diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> > index 8c85e2c38538..11fda9d061eb 100644
> > --- a/mm/hugetlb_vmemmap.c
> > +++ b/mm/hugetlb_vmemmap.c
> > @@ -458,17 +458,19 @@ static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
> >    * @end:	end address of the vmemmap virtual address range that we want to
> >    *		remap.
> >    * @reuse:	reuse address.
> > + * @flags:	modify behavior for bulk operations
> >    *
> >    * Return: %0 on success, negative error code otherwise.
> >    */
> >   static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
> > -			       unsigned long reuse)
> > +			       unsigned long reuse, unsigned long flags)
> >   {
> >   	LIST_HEAD(vmemmap_pages);
> >   	struct vmemmap_remap_walk walk = {
> >   		.remap_pte	= vmemmap_restore_pte,
> >   		.reuse_addr	= reuse,
> >   		.vmemmap_pages	= &vmemmap_pages,
> > +		.flags		= flags,
> >   	};
> >   	/* See the comment in the vmemmap_remap_free(). */
> > @@ -490,17 +492,7 @@ EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
> >   static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
> >   core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
> > -/**
> > - * hugetlb_vmemmap_restore - restore previously optimized (by
> > - *			     hugetlb_vmemmap_optimize()) vmemmap pages which
> > - *			     will be reallocated and remapped.
> > - * @h:		struct hstate.
> > - * @head:	the head page whose vmemmap pages will be restored.
> > - *
> > - * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
> > - * negative error code otherwise.
> > - */
> > -int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
> > +static int __hugetlb_vmemmap_restore(const struct hstate *h, struct page *head, unsigned long flags)
> >   {
> >   	int ret;
> >   	unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
> > @@ -521,7 +513,7 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
> >   	 * When a HugeTLB page is freed to the buddy allocator, previously
> >   	 * discarded vmemmap pages must be allocated and remapping.
> >   	 */
> > -	ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse);
> > +	ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse, flags);
> >   	if (!ret) {
> >   		ClearHPageVmemmapOptimized(head);
> >   		static_branch_dec(&hugetlb_optimize_vmemmap_key);
> > @@ -530,6 +522,21 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
> >   	return ret;
> >   }
> > +/**
> > + * hugetlb_vmemmap_restore - restore previously optimized (by
> > + *			     hugetlb_vmemmap_optimize()) vmemmap pages which
> > + *			     will be reallocated and remapped.
> > + * @h:		struct hstate.
> > + * @head:	the head page whose vmemmap pages will be restored.
> > + *
> > + * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
> > + * negative error code otherwise.
> > + */
> > +int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
> > +{
> > +	return __hugetlb_vmemmap_restore(h, head, 0UL);
> 
> UL suffix could be drooped.

Thanks, will fix both in next version.
diff mbox series

Patch

diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 8c85e2c38538..11fda9d061eb 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -458,17 +458,19 @@  static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
  * @end:	end address of the vmemmap virtual address range that we want to
  *		remap.
  * @reuse:	reuse address.
+ * @flags:	modify behavior for bulk operations
  *
  * Return: %0 on success, negative error code otherwise.
  */
 static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
-			       unsigned long reuse)
+			       unsigned long reuse, unsigned long flags)
 {
 	LIST_HEAD(vmemmap_pages);
 	struct vmemmap_remap_walk walk = {
 		.remap_pte	= vmemmap_restore_pte,
 		.reuse_addr	= reuse,
 		.vmemmap_pages	= &vmemmap_pages,
+		.flags		= flags,
 	};
 
 	/* See the comment in the vmemmap_remap_free(). */
@@ -490,17 +492,7 @@  EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
 static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
 core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
 
-/**
- * hugetlb_vmemmap_restore - restore previously optimized (by
- *			     hugetlb_vmemmap_optimize()) vmemmap pages which
- *			     will be reallocated and remapped.
- * @h:		struct hstate.
- * @head:	the head page whose vmemmap pages will be restored.
- *
- * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
- * negative error code otherwise.
- */
-int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
+static int __hugetlb_vmemmap_restore(const struct hstate *h, struct page *head, unsigned long flags)
 {
 	int ret;
 	unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
@@ -521,7 +513,7 @@  int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
 	 * When a HugeTLB page is freed to the buddy allocator, previously
 	 * discarded vmemmap pages must be allocated and remapping.
 	 */
-	ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse);
+	ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse, flags);
 	if (!ret) {
 		ClearHPageVmemmapOptimized(head);
 		static_branch_dec(&hugetlb_optimize_vmemmap_key);
@@ -530,6 +522,21 @@  int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
 	return ret;
 }
 
+/**
+ * hugetlb_vmemmap_restore - restore previously optimized (by
+ *			     hugetlb_vmemmap_optimize()) vmemmap pages which
+ *			     will be reallocated and remapped.
+ * @h:		struct hstate.
+ * @head:	the head page whose vmemmap pages will be restored.
+ *
+ * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
+ * negative error code otherwise.
+ */
+int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
+{
+	return __hugetlb_vmemmap_restore(h, head, 0UL);
+}
+
 /*
  * This function will attempt to resore vmemmap for a list of folios.  There
  * is no guarantee that restoration will be successful for all or any folios.
@@ -540,7 +547,9 @@  void hugetlb_vmemmap_restore_folios(const struct hstate *h, struct list_head *fo
 	struct folio *folio;
 
 	list_for_each_entry(folio, folio_list, lru)
-		(void)hugetlb_vmemmap_restore(h, &folio->page);
+		(void)__hugetlb_vmemmap_restore(h, &folio->page, VMEMMAP_NO_TLB_FLUSH);
+
+	flush_tlb_all();
 }
 
 /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */