diff mbox series

[v10,04/11] mm/hugetlb: Defer freeing of HugeTLB pages

Message ID 20201217121303.13386-5-songmuchun@bytedance.com (mailing list archive)
State New, archived
Headers show
Series Free some vmemmap pages of HugeTLB page | expand

Commit Message

Muchun Song Dec. 17, 2020, 12:12 p.m. UTC
In the subsequent patch, we will allocate the vmemmap pages when free
HugeTLB pages. But update_and_free_page() is called from a non-task
context(and hold hugetlb_lock), so we can defer the actual freeing in
a workqueue to prevent from using GFP_ATOMIC to allocate the vmemmap
pages.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
---
 mm/hugetlb.c         | 80 ++++++++++++++++++++++++++++++++++++++++++++++++----
 mm/hugetlb_vmemmap.c | 12 --------
 mm/hugetlb_vmemmap.h | 17 +++++++++++
 3 files changed, 91 insertions(+), 18 deletions(-)

Comments

Oscar Salvador Dec. 21, 2020, 10:27 a.m. UTC | #1
On Thu, Dec 17, 2020 at 08:12:56PM +0800, Muchun Song wrote:
> In the subsequent patch, we will allocate the vmemmap pages when free
> HugeTLB pages. But update_and_free_page() is called from a non-task
> context(and hold hugetlb_lock), so we can defer the actual freeing in
> a workqueue to prevent from using GFP_ATOMIC to allocate the vmemmap
> pages.

I think we would benefit from a more complete changelog, at least I had
to stare at the code for a while in order to grasp what are we trying
to do and the reasons behind.

> +static void __free_hugepage(struct hstate *h, struct page *page);
> +
> +/*
> + * As update_and_free_page() is be called from a non-task context(and hold
> + * hugetlb_lock), we can defer the actual freeing in a workqueue to prevent
> + * use GFP_ATOMIC to allocate a lot of vmemmap pages.

The above implies that update_and_free_page() is __always__ called from a 
non-task context, but that is not always the case?

> +static void update_hpage_vmemmap_workfn(struct work_struct *work)
>  {
> -	int i;
> +	struct llist_node *node;
> +	struct page *page;
>  
> +	node = llist_del_all(&hpage_update_freelist);
> +
> +	while (node) {
> +		page = container_of((struct address_space **)node,
> +				     struct page, mapping);
> +		node = node->next;
> +		page->mapping = NULL;
> +		__free_hugepage(page_hstate(page), page);
> +
> +		cond_resched();
> +	}
> +}
> +static DECLARE_WORK(hpage_update_work, update_hpage_vmemmap_workfn);

I wonder if this should be moved to hugetlb_vmemmap.c

> +/*
> + * This is where the call to allocate vmemmmap pages will be inserted.
> + */

I think this should go in the changelog.

> +static void __free_hugepage(struct hstate *h, struct page *page)
> +{
> +	int i;
> +
>  	for (i = 0; i < pages_per_huge_page(h); i++) {
>  		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
>  				1 << PG_referenced | 1 << PG_dirty |
> @@ -1313,13 +1377,17 @@ static void update_and_free_page(struct hstate *h, struct page *page)
>  	set_page_refcounted(page);
>  	if (hstate_is_gigantic(h)) {
>  		/*
> -		 * Temporarily drop the hugetlb_lock, because
> -		 * we might block in free_gigantic_page().
> +		 * Temporarily drop the hugetlb_lock only when this type of
> +		 * HugeTLB page does not support vmemmap optimization (which
> +		 * context do not hold the hugetlb_lock), because we might
> +		 * block in free_gigantic_page().

"
 /*
  * Temporarily drop the hugetlb_lock, because we might block
  * in free_gigantic_page(). Only drop it in case the vmemmap
  * optimization is disabled, since that context does not hold
  * the lock.
  */
" ?

 
Oscar Salvador
SUSE L3
Muchun Song Dec. 21, 2020, 11:07 a.m. UTC | #2
On Mon, Dec 21, 2020 at 6:27 PM Oscar Salvador <osalvador@suse.de> wrote:
>
> On Thu, Dec 17, 2020 at 08:12:56PM +0800, Muchun Song wrote:
> > In the subsequent patch, we will allocate the vmemmap pages when free
> > HugeTLB pages. But update_and_free_page() is called from a non-task
> > context(and hold hugetlb_lock), so we can defer the actual freeing in
> > a workqueue to prevent from using GFP_ATOMIC to allocate the vmemmap
> > pages.
>
> I think we would benefit from a more complete changelog, at least I had
> to stare at the code for a while in order to grasp what are we trying
> to do and the reasons behind.

OK. Will do.

>
> > +static void __free_hugepage(struct hstate *h, struct page *page);
> > +
> > +/*
> > + * As update_and_free_page() is be called from a non-task context(and hold
> > + * hugetlb_lock), we can defer the actual freeing in a workqueue to prevent
> > + * use GFP_ATOMIC to allocate a lot of vmemmap pages.
>
> The above implies that update_and_free_page() is __always__ called from a
> non-task context, but that is not always the case?

IIUC, here is always the case.

>
> > +static void update_hpage_vmemmap_workfn(struct work_struct *work)
> >  {
> > -     int i;
> > +     struct llist_node *node;
> > +     struct page *page;
> >
> > +     node = llist_del_all(&hpage_update_freelist);
> > +
> > +     while (node) {
> > +             page = container_of((struct address_space **)node,
> > +                                  struct page, mapping);
> > +             node = node->next;
> > +             page->mapping = NULL;
> > +             __free_hugepage(page_hstate(page), page);
> > +
> > +             cond_resched();
> > +     }
> > +}
> > +static DECLARE_WORK(hpage_update_work, update_hpage_vmemmap_workfn);
>
> I wonder if this should be moved to hugetlb_vmemmap.c

Maybe I can do a try.

>
> > +/*
> > + * This is where the call to allocate vmemmmap pages will be inserted.
> > + */
>
> I think this should go in the changelog.

OK. Will do.

>
> > +static void __free_hugepage(struct hstate *h, struct page *page)
> > +{
> > +     int i;
> > +
> >       for (i = 0; i < pages_per_huge_page(h); i++) {
> >               page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
> >                               1 << PG_referenced | 1 << PG_dirty |
> > @@ -1313,13 +1377,17 @@ static void update_and_free_page(struct hstate *h, struct page *page)
> >       set_page_refcounted(page);
> >       if (hstate_is_gigantic(h)) {
> >               /*
> > -              * Temporarily drop the hugetlb_lock, because
> > -              * we might block in free_gigantic_page().
> > +              * Temporarily drop the hugetlb_lock only when this type of
> > +              * HugeTLB page does not support vmemmap optimization (which
> > +              * context do not hold the hugetlb_lock), because we might
> > +              * block in free_gigantic_page().
>
> "
>  /*
>   * Temporarily drop the hugetlb_lock, because we might block
>   * in free_gigantic_page(). Only drop it in case the vmemmap
>   * optimization is disabled, since that context does not hold
>   * the lock.
>   */
> " ?

Thanks a lot.

>
>
> Oscar Salvador
> SUSE L3
Oscar Salvador Dec. 21, 2020, 2:14 p.m. UTC | #3
On Mon, Dec 21, 2020 at 07:07:18PM +0800, Muchun Song wrote:
> > The above implies that update_and_free_page() is __always__ called from a
> > non-task context, but that is not always the case?
> 
> IIUC, here is always the case.

I might be missing something obvious, so bear with me.

I guess you are refering to the call __free_huge_page()->update_and_free_page().
AFAICS, free_huge_page might call __free_huge_page right away when in task
context, and so, we would be calling update_and_free in a task context as well.

Or are you referring to the other callers?
Muchun Song Dec. 21, 2020, 3:18 p.m. UTC | #4
On Mon, Dec 21, 2020 at 10:14 PM Oscar Salvador <osalvador@suse.de> wrote:
>
> On Mon, Dec 21, 2020 at 07:07:18PM +0800, Muchun Song wrote:
> > > The above implies that update_and_free_page() is __always__ called from a
> > > non-task context, but that is not always the case?
> >
> > IIUC, here is always the case.
>
> I might be missing something obvious, so bear with me.
>
> I guess you are refering to the call __free_huge_page()->update_and_free_page().
> AFAICS, free_huge_page might call __free_huge_page right away when in task
> context, and so, we would be calling update_and_free in a task context as well.

Yeah. You are right. I mean the call __free_huge_page()->update_and_free_page().
Because update_and_free_page is called under hugetlb_lock, it is
non-task context,
right?


>
> Or are you referring to the other callers?
>
> --
> Oscar Salvador
> SUSE L3
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 140135fc8113..9f35f34d3195 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1292,15 +1292,79 @@  static inline void destroy_compound_gigantic_page(struct page *page,
 						unsigned int order) { }
 #endif
 
-static void update_and_free_page(struct hstate *h, struct page *page)
+static void __free_hugepage(struct hstate *h, struct page *page);
+
+/*
+ * As update_and_free_page() is be called from a non-task context(and hold
+ * hugetlb_lock), we can defer the actual freeing in a workqueue to prevent
+ * use GFP_ATOMIC to allocate a lot of vmemmap pages.
+ *
+ * update_hpage_vmemmap_workfn() locklessly retrieves the linked list of
+ * pages to be freed and frees them one-by-one. As the page->mapping pointer
+ * is going to be cleared in update_hpage_vmemmap_workfn() anyway, it is
+ * reused as the llist_node structure of a lockless linked list of huge
+ * pages to be freed.
+ */
+static LLIST_HEAD(hpage_update_freelist);
+
+static void update_hpage_vmemmap_workfn(struct work_struct *work)
 {
-	int i;
+	struct llist_node *node;
+	struct page *page;
 
+	node = llist_del_all(&hpage_update_freelist);
+
+	while (node) {
+		page = container_of((struct address_space **)node,
+				     struct page, mapping);
+		node = node->next;
+		page->mapping = NULL;
+		__free_hugepage(page_hstate(page), page);
+
+		cond_resched();
+	}
+}
+static DECLARE_WORK(hpage_update_work, update_hpage_vmemmap_workfn);
+
+static inline void __update_and_free_page(struct hstate *h, struct page *page)
+{
+	/* No need to allocate vmemmap pages */
+	if (!free_vmemmap_pages_per_hpage(h)) {
+		__free_hugepage(h, page);
+		return;
+	}
+
+	/*
+	 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap
+	 * pages.
+	 *
+	 * Only call schedule_work() if hpage_update_freelist is previously
+	 * empty. Otherwise, schedule_work() had been called but the workfn
+	 * hasn't retrieved the list yet.
+	 */
+	if (llist_add((struct llist_node *)&page->mapping,
+		      &hpage_update_freelist))
+		schedule_work(&hpage_update_work);
+}
+
+static void update_and_free_page(struct hstate *h, struct page *page)
+{
 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
 		return;
 
 	h->nr_huge_pages--;
 	h->nr_huge_pages_node[page_to_nid(page)]--;
+
+	__update_and_free_page(h, page);
+}
+
+/*
+ * This is where the call to allocate vmemmmap pages will be inserted.
+ */
+static void __free_hugepage(struct hstate *h, struct page *page)
+{
+	int i;
+
 	for (i = 0; i < pages_per_huge_page(h); i++) {
 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
 				1 << PG_referenced | 1 << PG_dirty |
@@ -1313,13 +1377,17 @@  static void update_and_free_page(struct hstate *h, struct page *page)
 	set_page_refcounted(page);
 	if (hstate_is_gigantic(h)) {
 		/*
-		 * Temporarily drop the hugetlb_lock, because
-		 * we might block in free_gigantic_page().
+		 * Temporarily drop the hugetlb_lock only when this type of
+		 * HugeTLB page does not support vmemmap optimization (which
+		 * context do not hold the hugetlb_lock), because we might
+		 * block in free_gigantic_page().
 		 */
-		spin_unlock(&hugetlb_lock);
+		if (!free_vmemmap_pages_per_hpage(h))
+			spin_unlock(&hugetlb_lock);
 		destroy_compound_gigantic_page(page, huge_page_order(h));
 		free_gigantic_page(page, huge_page_order(h));
-		spin_lock(&hugetlb_lock);
+		if (!free_vmemmap_pages_per_hpage(h))
+			spin_lock(&hugetlb_lock);
 	} else {
 		__free_pages(page, huge_page_order(h));
 	}
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 5cf7b6122c86..c4bbca270453 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -178,18 +178,6 @@ 
 #define RESERVE_VMEMMAP_NR		2U
 #define RESERVE_VMEMMAP_SIZE		(RESERVE_VMEMMAP_NR << PAGE_SHIFT)
 
-/*
- * How many vmemmap pages associated with a HugeTLB page that can be freed
- * to the buddy allocator.
- *
- * Todo: Returns zero for now, which means the feature is disabled. We will
- * enable it once all the infrastructure is there.
- */
-static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
-{
-	return 0;
-}
-
 static inline unsigned long free_vmemmap_pages_size_per_hpage(struct hstate *h)
 {
 	return (unsigned long)free_vmemmap_pages_per_hpage(h) << PAGE_SHIFT;
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index 6923f03534d5..01f8637adbe0 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -12,9 +12,26 @@ 
 
 #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
 void free_huge_page_vmemmap(struct hstate *h, struct page *head);
+
+/*
+ * How many vmemmap pages associated with a HugeTLB page that can be freed
+ * to the buddy allocator.
+ *
+ * Todo: Returns zero for now, which means the feature is disabled. We will
+ * enable it once all the infrastructure is there.
+ */
+static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
+{
+	return 0;
+}
 #else
 static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
 {
 }
+
+static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
+{
+	return 0;
+}
 #endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */
 #endif /* _LINUX_HUGETLB_VMEMMAP_H */