diff mbox series

[RFC,21/24] mm/hugetlb: Merge pte to huge pmd only for gigantic page

Message ID 20200915125947.26204-22-songmuchun@bytedance.com (mailing list archive)
State New, archived
Headers show
Series mm/hugetlb: Free some vmemmap pages of hugetlb page | expand

Commit Message

Muchun Song Sept. 15, 2020, 12:59 p.m. UTC
Merge pte to huge pmd if it has ever been split. Now only support
gigantic page which's vmemmap pages size is an integer multiple of
PMD_SIZE. This is the simplest case to handle.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 include/linux/hugetlb.h |   7 +++
 mm/hugetlb.c            | 104 +++++++++++++++++++++++++++++++++++++++-
 2 files changed, 109 insertions(+), 2 deletions(-)

Comments

Muchun Song Sept. 20, 2020, 9:59 a.m. UTC | #1
On Tue, Sep 15, 2020 at 9:03 PM Muchun Song <songmuchun@bytedance.com> wrote:
>
> Merge pte to huge pmd if it has ever been split. Now only support
> gigantic page which's vmemmap pages size is an integer multiple of
> PMD_SIZE. This is the simplest case to handle.
>
> Signed-off-by: Muchun Song <songmuchun@bytedance.com>
> ---
>  include/linux/hugetlb.h |   7 +++
>  mm/hugetlb.c            | 104 +++++++++++++++++++++++++++++++++++++++-
>  2 files changed, 109 insertions(+), 2 deletions(-)
>
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index e3aa192f1c39..c56df0da7ae5 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -611,6 +611,13 @@ static inline bool vmemmap_pmd_huge(pmd_t *pmd)
>  }
>  #endif
>
> +#ifndef vmemmap_pmd_mkhuge
> +static inline pmd_t vmemmap_pmd_mkhuge(struct page *page)
> +{
> +       return pmd_mkhuge(mk_pmd(page, PAGE_KERNEL));
> +}
> +#endif
> +
>  #ifndef VMEMMAP_HPAGE_SHIFT
>  #define VMEMMAP_HPAGE_SHIFT            PMD_SHIFT
>  #endif
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 28c154679838..3ca36e259b4e 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1759,6 +1759,62 @@ static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
>         }
>  }
>
> +static void __replace_huge_page_pte_vmemmap(pte_t *ptep, unsigned long start,
> +                                           unsigned int nr, struct page *huge,
> +                                           struct list_head *free_pages)
> +{
> +       unsigned long addr;
> +       unsigned long end = start + (nr  << PAGE_SHIFT);
> +
> +       for (addr = start; addr < end; addr += PAGE_SIZE, ptep++) {
> +               struct page *page;
> +               pte_t old = *ptep;
> +               pte_t entry;
> +
> +               prepare_vmemmap_page(huge);
> +
> +               entry = mk_pte(huge++, PAGE_KERNEL);
> +               VM_WARN_ON(!pte_present(old));
> +               page = pte_page(old);
> +               list_add(&page->lru, free_pages);
> +
> +               set_pte_at(&init_mm, addr, ptep, entry);
> +       }
> +}
> +
> +static void replace_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
> +                                         struct page *huge,
> +                                         struct list_head *free_pages)
> +{
> +       unsigned long end = start + VMEMMAP_HPAGE_SIZE;
> +
> +       flush_cache_vunmap(start, end);
> +       __replace_huge_page_pte_vmemmap(pte_offset_kernel(pmd, start), start,
> +                                       VMEMMAP_HPAGE_NR, huge, free_pages);
> +       flush_tlb_kernel_range(start, end);
> +}
> +
> +static pte_t *merge_vmemmap_pte(pmd_t *pmdp, unsigned long addr)
> +{
> +       pte_t *pte;
> +       struct page *page;
> +
> +       pte = pte_offset_kernel(pmdp, addr);
> +       page = pte_page(*pte);
> +       set_pmd(pmdp, vmemmap_pmd_mkhuge(page));
> +
> +       return pte;
> +}
> +
> +static void merge_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
> +                                       struct page *huge,
> +                                       struct list_head *free_pages)
> +{
> +       replace_huge_page_pmd_vmemmap(pmd, start, huge, free_pages);
> +       pte_free_kernel(&init_mm, merge_vmemmap_pte(pmd, start));
> +       flush_tlb_kernel_range(start, start + VMEMMAP_HPAGE_SIZE);
> +}
> +
>  static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list)
>  {
>         int i;
> @@ -1772,6 +1828,15 @@ static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list)
>         }
>  }
>
> +static inline void dissolve_compound_page(struct page *page, unsigned int order)
> +{
> +       int i;
> +       unsigned int nr_pages = 1 << order;
> +
> +       for (i = 1; i < nr_pages; i++)
> +               set_page_refcounted(page + i);
> +}
> +
>  static void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
>  {
>         pmd_t *pmd;
> @@ -1791,10 +1856,45 @@ static void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
>                                     __remap_huge_page_pte_vmemmap);
>         if (!freed_vmemmap_hpage_dec(pmd_page(*pmd)) && pmd_split(pmd)) {
>                 /*
> -                * Todo:
> -                * Merge pte to huge pmd if it has ever been split.
> +                * Merge pte to huge pmd if it has ever been split. Now only
> +                * support gigantic page which's vmemmap pages size is an
> +                * integer multiple of PMD_SIZE. This is the simplest case
> +                * to handle.
>                  */
>                 clear_pmd_split(pmd);
> +
> +               if (IS_ALIGNED(nr_vmemmap(h), VMEMMAP_HPAGE_NR)) {
> +                       unsigned long addr = (unsigned long)head;
> +                       unsigned long end = addr + nr_vmemmap_size(h);
> +
> +                       spin_unlock(ptl);
> +
> +                       for (; addr < end; addr += VMEMMAP_HPAGE_SIZE) {
> +                               void *to;
> +                               struct page *page;
> +
> +                               page = alloc_pages(GFP_VMEMMAP_PAGE & ~__GFP_NOFAIL,
> +                                                  VMEMMAP_HPAGE_ORDER);
> +                               if (!page)
> +                                       goto out;

Here forget to call dissolve_compound_page().

+                               dissolve_compound_page(page,
+                                                      VMEMMAP_HPAGE_ORDER);

> +
> +                               to = page_to_virt(page);
> +                               memcpy(to, (void *)addr, VMEMMAP_HPAGE_SIZE);
> +
> +                               /*
> +                                * Make sure that any data that writes to the
> +                                * @to is made visible to the physical page.
> +                                */
> +                               flush_kernel_vmap_range(to, VMEMMAP_HPAGE_SIZE);
> +
> +                               merge_huge_page_pmd_vmemmap(pmd++, addr, page,
> +                                                           &remap_pages);
> +                       }
> +
> +out:
> +                       free_vmemmap_page_list(&remap_pages);
> +                       return;
> +               }
>         }
>         spin_unlock(ptl);
>  }
> --
> 2.20.1
>
diff mbox series

Patch

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index e3aa192f1c39..c56df0da7ae5 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -611,6 +611,13 @@  static inline bool vmemmap_pmd_huge(pmd_t *pmd)
 }
 #endif
 
+#ifndef vmemmap_pmd_mkhuge
+static inline pmd_t vmemmap_pmd_mkhuge(struct page *page)
+{
+	return pmd_mkhuge(mk_pmd(page, PAGE_KERNEL));
+}
+#endif
+
 #ifndef VMEMMAP_HPAGE_SHIFT
 #define VMEMMAP_HPAGE_SHIFT		PMD_SHIFT
 #endif
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 28c154679838..3ca36e259b4e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1759,6 +1759,62 @@  static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
 	}
 }
 
+static void __replace_huge_page_pte_vmemmap(pte_t *ptep, unsigned long start,
+					    unsigned int nr, struct page *huge,
+					    struct list_head *free_pages)
+{
+	unsigned long addr;
+	unsigned long end = start + (nr  << PAGE_SHIFT);
+
+	for (addr = start; addr < end; addr += PAGE_SIZE, ptep++) {
+		struct page *page;
+		pte_t old = *ptep;
+		pte_t entry;
+
+		prepare_vmemmap_page(huge);
+
+		entry = mk_pte(huge++, PAGE_KERNEL);
+		VM_WARN_ON(!pte_present(old));
+		page = pte_page(old);
+		list_add(&page->lru, free_pages);
+
+		set_pte_at(&init_mm, addr, ptep, entry);
+	}
+}
+
+static void replace_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
+					  struct page *huge,
+					  struct list_head *free_pages)
+{
+	unsigned long end = start + VMEMMAP_HPAGE_SIZE;
+
+	flush_cache_vunmap(start, end);
+	__replace_huge_page_pte_vmemmap(pte_offset_kernel(pmd, start), start,
+					VMEMMAP_HPAGE_NR, huge, free_pages);
+	flush_tlb_kernel_range(start, end);
+}
+
+static pte_t *merge_vmemmap_pte(pmd_t *pmdp, unsigned long addr)
+{
+	pte_t *pte;
+	struct page *page;
+
+	pte = pte_offset_kernel(pmdp, addr);
+	page = pte_page(*pte);
+	set_pmd(pmdp, vmemmap_pmd_mkhuge(page));
+
+	return pte;
+}
+
+static void merge_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
+					struct page *huge,
+					struct list_head *free_pages)
+{
+	replace_huge_page_pmd_vmemmap(pmd, start, huge, free_pages);
+	pte_free_kernel(&init_mm, merge_vmemmap_pte(pmd, start));
+	flush_tlb_kernel_range(start, start + VMEMMAP_HPAGE_SIZE);
+}
+
 static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list)
 {
 	int i;
@@ -1772,6 +1828,15 @@  static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list)
 	}
 }
 
+static inline void dissolve_compound_page(struct page *page, unsigned int order)
+{
+	int i;
+	unsigned int nr_pages = 1 << order;
+
+	for (i = 1; i < nr_pages; i++)
+		set_page_refcounted(page + i);
+}
+
 static void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
 {
 	pmd_t *pmd;
@@ -1791,10 +1856,45 @@  static void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
 				    __remap_huge_page_pte_vmemmap);
 	if (!freed_vmemmap_hpage_dec(pmd_page(*pmd)) && pmd_split(pmd)) {
 		/*
-		 * Todo:
-		 * Merge pte to huge pmd if it has ever been split.
+		 * Merge pte to huge pmd if it has ever been split. Now only
+		 * support gigantic page which's vmemmap pages size is an
+		 * integer multiple of PMD_SIZE. This is the simplest case
+		 * to handle.
 		 */
 		clear_pmd_split(pmd);
+
+		if (IS_ALIGNED(nr_vmemmap(h), VMEMMAP_HPAGE_NR)) {
+			unsigned long addr = (unsigned long)head;
+			unsigned long end = addr + nr_vmemmap_size(h);
+
+			spin_unlock(ptl);
+
+			for (; addr < end; addr += VMEMMAP_HPAGE_SIZE) {
+				void *to;
+				struct page *page;
+
+				page = alloc_pages(GFP_VMEMMAP_PAGE & ~__GFP_NOFAIL,
+						   VMEMMAP_HPAGE_ORDER);
+				if (!page)
+					goto out;
+
+				to = page_to_virt(page);
+				memcpy(to, (void *)addr, VMEMMAP_HPAGE_SIZE);
+
+				/*
+				 * Make sure that any data that writes to the
+				 * @to is made visible to the physical page.
+				 */
+				flush_kernel_vmap_range(to, VMEMMAP_HPAGE_SIZE);
+
+				merge_huge_page_pmd_vmemmap(pmd++, addr, page,
+							    &remap_pages);
+			}
+
+out:
+			free_vmemmap_page_list(&remap_pages);
+			return;
+		}
 	}
 	spin_unlock(ptl);
 }