@@ -506,6 +506,9 @@ struct hstate {
struct huge_bootmem_page {
struct list_head list;
struct hstate *hstate;
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+ pte_t *vmemmap_pte;
+#endif
};
struct page *alloc_huge_page(struct vm_area_struct *vma,
@@ -2607,6 +2607,7 @@ static void __init gather_bootmem_prealloc(void)
WARN_ON(page_count(page) != 1);
prep_compound_huge_page(page, h->order);
WARN_ON(PageReserved(page));
+ gather_vmemmap_pgtable_init(m, page);
prep_new_huge_page(h, page, page_to_nid(page));
put_page(page); /* free it into the hugepage allocator */
@@ -2659,6 +2660,10 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
break;
cond_resched();
}
+
+ if (hstate_is_gigantic(h))
+ i -= gather_vmemmap_pgtable_prealloc();
+
if (i < h->max_huge_pages) {
char buf[32];
@@ -103,6 +103,7 @@
#include <linux/mmzone.h>
#include <linux/list.h>
#include <linux/bootmem_info.h>
+#include <linux/memblock.h>
#include <asm/pgalloc.h>
#include "hugetlb_vmemmap.h"
@@ -204,6 +205,65 @@ int vmemmap_pgtable_prealloc(struct hstate *h, struct page *page)
return -ENOMEM;
}
+unsigned long __init gather_vmemmap_pgtable_prealloc(void)
+{
+ struct huge_bootmem_page *m, *tmp;
+ unsigned long nr_free = 0;
+
+ list_for_each_entry_safe(m, tmp, &huge_boot_pages, list) {
+ struct hstate *h = m->hstate;
+ unsigned int nr = pgtable_pages_to_prealloc_per_hpage(h);
+ unsigned int pgtable_size;
+
+ if (!nr)
+ continue;
+
+ pgtable_size = nr << PAGE_SHIFT;
+ m->vmemmap_pte = memblock_alloc_try_nid(pgtable_size,
+ PAGE_SIZE, 0, MEMBLOCK_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+ if (!m->vmemmap_pte) {
+ nr_free++;
+ list_del(&m->list);
+ memblock_free_early(__pa(m), huge_page_size(h));
+ }
+ }
+
+ return nr_free;
+}
+
+void __init gather_vmemmap_pgtable_init(struct huge_bootmem_page *m,
+ struct page *page)
+{
+ struct hstate *h = m->hstate;
+ unsigned long pte = (unsigned long)m->vmemmap_pte;
+ unsigned int nr = pgtable_pages_to_prealloc_per_hpage(h);
+
+ /*
+ * Use the huge page lru list to temporarily store the preallocated
+ * pages. The preallocated pages are used and the list is emptied
+ * before the huge page is put into use. When the huge page is put
+ * into use by prep_new_huge_page() the list will be reinitialized.
+ */
+ INIT_LIST_HEAD(&page->lru);
+
+ while (nr--) {
+ struct page *pte_page = virt_to_page(pte);
+
+ __ClearPageReserved(pte_page);
+ list_add(&pte_page->lru, &page->lru);
+ pte += PAGE_SIZE;
+ }
+
+ /*
+ * If we had gigantic hugepages allocated at boot time, we need
+ * to restore the 'stolen' pages to totalram_pages in order to
+ * fix confusing memory reports from free(1) and another
+ * side-effects, like CommitLimit going negative.
+ */
+ adjust_managed_page_count(page, nr);
+}
+
/*
* Walk a vmemmap address to the pmd it maps.
*/
@@ -14,6 +14,9 @@
void __init hugetlb_vmemmap_init(struct hstate *h);
int vmemmap_pgtable_prealloc(struct hstate *h, struct page *page);
void vmemmap_pgtable_free(struct page *page);
+unsigned long __init gather_vmemmap_pgtable_prealloc(void);
+void __init gather_vmemmap_pgtable_init(struct huge_bootmem_page *m,
+ struct page *page);
void alloc_huge_page_vmemmap(struct hstate *h, struct page *head);
void free_huge_page_vmemmap(struct hstate *h, struct page *head);
@@ -35,6 +38,16 @@ static inline void vmemmap_pgtable_free(struct page *page)
{
}
+static inline unsigned long gather_vmemmap_pgtable_prealloc(void)
+{
+ return 0;
+}
+
+static inline void gather_vmemmap_pgtable_init(struct huge_bootmem_page *m,
+ struct page *page)
+{
+}
+
static inline void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
{
}
The gigantic page is allocated by bootmem, if we want to free the unused vmemmap pages. We also should allocate the page table. So we also allocate page tables from bootmem. Signed-off-by: Muchun Song <songmuchun@bytedance.com> --- include/linux/hugetlb.h | 3 +++ mm/hugetlb.c | 5 +++++ mm/hugetlb_vmemmap.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++++ mm/hugetlb_vmemmap.h | 13 +++++++++++ 4 files changed, 81 insertions(+)