diff mbox series

[2/5] mm: hugetlb: introduce helpers to preallocate page tables from bootmem allocator

Message ID 20210609121310.62229-3-songmuchun@bytedance.com (mailing list archive)
State New, archived
Headers show
Series Split huge PMD mapping of vmemmap pages | expand

Commit Message

Muchun Song June 9, 2021, 12:13 p.m. UTC
If we want to split the huge PMD of vmemmap pages associated with each
gigantic page allocated from bootmem allocator, we should pre-allocate
the page tables from bootmem allocator. In this patch, we introduce
some helpers to preallocate page tables for gigantic pages.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 include/linux/hugetlb.h |  3 +++
 mm/hugetlb_vmemmap.c    | 63 +++++++++++++++++++++++++++++++++++++++++++++++++
 mm/hugetlb_vmemmap.h    | 13 ++++++++++
 3 files changed, 79 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 03ca83db0a3e..c27a299c4211 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -622,6 +622,9 @@  struct hstate {
 struct huge_bootmem_page {
 	struct list_head list;
 	struct hstate *hstate;
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+	pte_t *vmemmap_pte;
+#endif
 };
 
 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 628e2752714f..6f3a47b4ebd3 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -171,6 +171,7 @@ 
 #define pr_fmt(fmt)	"HugeTLB: " fmt
 
 #include <linux/list.h>
+#include <linux/memblock.h>
 #include <asm/pgalloc.h>
 
 #include "hugetlb_vmemmap.h"
@@ -263,6 +264,68 @@  int vmemmap_pgtable_prealloc(struct hstate *h, struct list_head *pgtables)
 	return -ENOMEM;
 }
 
+unsigned long __init gigantic_vmemmap_pgtable_prealloc(void)
+{
+	struct huge_bootmem_page *m, *tmp;
+	unsigned long nr_free = 0;
+
+	list_for_each_entry_safe(m, tmp, &huge_boot_pages, list) {
+		struct hstate *h = m->hstate;
+		unsigned int nr = pgtable_pages_to_prealloc_per_hpage(h);
+		unsigned long size;
+
+		if (!nr)
+			continue;
+
+		size = nr << PAGE_SHIFT;
+		m->vmemmap_pte = memblock_alloc_try_nid(size, PAGE_SIZE, 0,
+							MEMBLOCK_ALLOC_ACCESSIBLE,
+							NUMA_NO_NODE);
+		if (!m->vmemmap_pte) {
+			nr_free++;
+			list_del(&m->list);
+			memblock_free_early(__pa(m), huge_page_size(h));
+		}
+	}
+
+	return nr_free;
+}
+
+void __init gigantic_vmemmap_pgtable_init(struct huge_bootmem_page *m,
+					  struct page *head)
+{
+	struct hstate *h = m->hstate;
+	unsigned long pte = (unsigned long)m->vmemmap_pte;
+	unsigned int nr = pgtable_pages_to_prealloc_per_hpage(h);
+
+	if (!nr)
+		return;
+
+	/*
+	 * If we had gigantic hugepages allocated at boot time, we need
+	 * to restore the 'stolen' pages to totalram_pages in order to
+	 * fix confusing memory reports from free(1) and another
+	 * side-effects, like CommitLimit going negative.
+	 */
+	adjust_managed_page_count(head, nr);
+
+	/*
+	 * Use the huge page lru list to temporarily store the preallocated
+	 * pages. The preallocated pages are used and the list is emptied
+	 * before the huge page is put into use. When the huge page is put
+	 * into use by prep_new_huge_page() the list will be reinitialized.
+	 */
+	INIT_LIST_HEAD(&head->lru);
+
+	while (nr--) {
+		struct page *pte_page = virt_to_page(pte);
+
+		__ClearPageReserved(pte_page);
+		list_add(&pte_page->lru, &head->lru);
+		pte += PAGE_SIZE;
+	}
+}
+
 /*
  * Previously discarded vmemmap pages will be allocated and remapping
  * after this function returns zero.
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index 306e15519da1..f6170720f183 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -16,6 +16,9 @@  void free_huge_page_vmemmap(struct hstate *h, struct page *head);
 void hugetlb_vmemmap_init(struct hstate *h);
 int vmemmap_pgtable_prealloc(struct hstate *h, struct list_head *pgtables);
 void vmemmap_pgtable_free(struct list_head *pgtables);
+unsigned long gigantic_vmemmap_pgtable_prealloc(void);
+void gigantic_vmemmap_pgtable_init(struct huge_bootmem_page *m,
+				   struct page *head);
 
 /*
  * How many vmemmap pages associated with a HugeTLB page that can be freed
@@ -45,6 +48,16 @@  static inline void vmemmap_pgtable_free(struct list_head *pgtables)
 {
 }
 
+static inline unsigned long gigantic_vmemmap_pgtable_prealloc(void)
+{
+	return 0;
+}
+
+static inline void gigantic_vmemmap_pgtable_init(struct huge_bootmem_page *m,
+						 struct page *head)
+{
+}
+
 static inline void hugetlb_vmemmap_init(struct hstate *h)
 {
 }