@@ -99,6 +99,8 @@
*/
#define pr_fmt(fmt) "HugeTLB Vmemmap: " fmt
+#include <linux/list.h>
+#include <asm/pgalloc.h>
#include "hugetlb_vmemmap.h"
/*
@@ -111,6 +113,80 @@
*/
#define RESERVE_VMEMMAP_NR 2U
+#ifndef VMEMMAP_HPAGE_SHIFT
+#define VMEMMAP_HPAGE_SHIFT HPAGE_SHIFT
+#endif
+#define VMEMMAP_HPAGE_ORDER (VMEMMAP_HPAGE_SHIFT - PAGE_SHIFT)
+#define VMEMMAP_HPAGE_NR (1 << VMEMMAP_HPAGE_ORDER)
+#define VMEMMAP_HPAGE_SIZE ((1UL) << VMEMMAP_HPAGE_SHIFT)
+#define VMEMMAP_HPAGE_MASK (~(VMEMMAP_HPAGE_SIZE - 1))
+
+static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
+{
+ return h->nr_free_vmemmap_pages;
+}
+
+static inline unsigned int vmemmap_pages_per_hpage(struct hstate *h)
+{
+ return free_vmemmap_pages_per_hpage(h) + RESERVE_VMEMMAP_NR;
+}
+
+static inline unsigned long vmemmap_pages_size_per_hpage(struct hstate *h)
+{
+ return (unsigned long)vmemmap_pages_per_hpage(h) << PAGE_SHIFT;
+}
+
+static inline unsigned int pgtable_pages_to_prealloc_per_hpage(struct hstate *h)
+{
+ unsigned long vmemmap_size = vmemmap_pages_size_per_hpage(h);
+
+ /*
+ * No need pre-allocate page tables when there is no vmemmap pages
+ * to free.
+ */
+ if (!free_vmemmap_pages_per_hpage(h))
+ return 0;
+
+ return ALIGN(vmemmap_size, VMEMMAP_HPAGE_SIZE) >> VMEMMAP_HPAGE_SHIFT;
+}
+
+void vmemmap_pgtable_free(struct page *page)
+{
+ struct page *pte_page, *t_page;
+
+ list_for_each_entry_safe(pte_page, t_page, &page->lru, lru) {
+ list_del(&pte_page->lru);
+ pte_free_kernel(&init_mm, page_to_virt(pte_page));
+ }
+}
+
+int vmemmap_pgtable_prealloc(struct hstate *h, struct page *page)
+{
+ unsigned int nr = pgtable_pages_to_prealloc_per_hpage(h);
+
+ /*
+ * Use the huge page lru list to temporarily store the preallocated
+ * pages. The preallocated pages are used and the list is emptied
+ * before the huge page is put into use. When the huge page is put
+ * into use by prep_new_huge_page() the list will be reinitialized.
+ */
+ INIT_LIST_HEAD(&page->lru);
+
+ while (nr--) {
+ pte_t *pte_p;
+
+ pte_p = pte_alloc_one_kernel(&init_mm);
+ if (!pte_p)
+ goto out;
+ list_add(&virt_to_page(pte_p)->lru, &page->lru);
+ }
+
+ return 0;
+out:
+ vmemmap_pgtable_free(page);
+ return -ENOMEM;
+}
+
void __init hugetlb_vmemmap_init(struct hstate *h)
{
unsigned int order = huge_page_order(h);
@@ -12,9 +12,20 @@
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
void __init hugetlb_vmemmap_init(struct hstate *h);
+int vmemmap_pgtable_prealloc(struct hstate *h, struct page *page);
+void vmemmap_pgtable_free(struct page *page);
#else
static inline void hugetlb_vmemmap_init(struct hstate *h)
{
}
+
+static inline int vmemmap_pgtable_prealloc(struct hstate *h, struct page *page)
+{
+ return 0;
+}
+
+static inline void vmemmap_pgtable_free(struct page *page)
+{
+}
#endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */
#endif /* _LINUX_HUGETLB_VMEMMAP_H */