@@ -46,6 +46,9 @@ extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
bool no_warn);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
+extern bool cma_clear_bitmap_if_in_range(struct cma *cma, const struct page *page,
+ unsigned int count);
+
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
extern void cma_reserve(int min_order, unsigned long requested_size,
@@ -24,6 +24,8 @@ extern struct page *follow_trans_huge_pud(struct vm_area_struct *vma,
unsigned long addr,
pud_t *pud,
unsigned int flags);
+extern struct page *alloc_thp_pud_page(int nid);
+extern bool free_thp_pud_page(struct page *page, int order);
#else
static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
{
@@ -43,6 +45,14 @@ struct page *follow_trans_huge_pud(struct vm_area_struct *vma,
{
return NULL;
}
+struct page *alloc_thp_pud_page(int nid)
+{
+ return NULL;
+}
+extern bool free_thp_pud_page(struct page *page, int order);
+{
+ return false;
+}
#endif
extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
@@ -532,6 +532,37 @@ bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
return true;
}
+/**
+ * cma_clear_bitmap_if_in_range() - clear bitmap for a given page
+ * @cma: Contiguous memory region for which the allocation is performed.
+ * @pages: Allocated pages.
+ * @count: Number of allocated pages.
+ *
+ * This function clears bitmap of memory allocated by cma_alloc().
+ * It returns false when provided pages do not belong to contiguous area and
+ * true otherwise.
+ */
+bool cma_clear_bitmap_if_in_range(struct cma *cma, const struct page *pages,
+ unsigned int count)
+{
+ unsigned long pfn;
+
+ if (!cma || !pages)
+ return false;
+
+ pfn = page_to_pfn(pages);
+
+ if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+ return false;
+
+ if (pfn + count > cma->base_pfn + cma->count)
+ return false;
+
+ cma_clear_bitmap(cma, pfn, count);
+
+ return true;
+}
+
int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
{
int i;
@@ -33,6 +33,7 @@
#include <linux/oom.h>
#include <linux/numa.h>
#include <linux/page_owner.h>
+#include <linux/cma.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
@@ -64,6 +65,10 @@ static struct shrinker deferred_split_shrinker;
static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;
+#ifdef CONFIG_CMA
+extern struct cma *hugepage_cma[MAX_NUMNODES];
+#endif
+
bool transparent_hugepage_enabled(struct vm_area_struct *vma)
{
/* The addr is used to check if the vma size fits */
@@ -2526,6 +2531,13 @@ static void __split_huge_pud_page(struct page *page, struct list_head *list,
/* no file-back page support yet */
VM_BUG_ON(!PageAnon(page));
+ /* */
+ if (IS_ENABLED(CONFIG_CMA)) {
+ struct cma *cma = hugepage_cma[page_to_nid(head)];
+ VM_BUG_ON(!cma_clear_bitmap_if_in_range(cma, head,
+ thp_nr_pages(head)));
+ }
+
for (i = HPAGE_PUD_NR - HPAGE_PMD_NR; i >= 1; i -= HPAGE_PMD_NR) {
__split_huge_pud_page_tail(head, i, lruvec, list);
}
@@ -3753,3 +3765,21 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
update_mmu_cache_pmd(vma, address, pvmw->pmd);
}
#endif
+
+struct page *alloc_thp_pud_page(int nid)
+{
+ struct page *page = NULL;
+#ifdef CONFIG_CMA
+ page = cma_alloc(hugepage_cma[nid], HPAGE_PUD_NR, HPAGE_PUD_ORDER, true);
+#endif
+ return page;
+}
+
+bool free_thp_pud_page(struct page *page, int order)
+{
+ bool ret = false;
+#ifdef CONFIG_CMA
+ ret = cma_release(hugepage_cma[page_to_nid(page)], page, 1<<order);
+#endif
+ return ret;
+}
@@ -2139,7 +2139,10 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
struct page *page;
if (order > MAX_ORDER) {
- page = alloc_contig_pages(1UL<<order, gfp, nid, NULL);
+ if (order == HPAGE_PUD_ORDER)
+ page = alloc_thp_pud_page(nid);
+ if (!page)
+ page = alloc_contig_pages(1UL<<order, gfp, nid, NULL);
if (page && (gfp & __GFP_COMP))
prep_compound_page(page, order);
} else
@@ -2219,8 +2222,11 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
mpol_cond_put(pol);
if (order > MAX_ORDER) {
- page = alloc_contig_pages(1UL<<order, gfp,
- hpage_node, NULL);
+ if (order == HPAGE_PUD_ORDER)
+ page = alloc_thp_pud_page(hpage_node);
+ if (!page)
+ page = alloc_contig_pages(1UL<<order,
+ gfp, hpage_node, NULL);
if (page && (gfp & __GFP_COMP))
prep_compound_page(page, order);
goto out;
@@ -1509,7 +1509,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
if (order >= MAX_ORDER) {
destroy_compound_gigantic_page(page, order);
- free_contig_range(page_to_pfn(page), 1 << order);
+ if (!free_thp_pud_page(page, order))
+ free_contig_range(page_to_pfn(page), 1 << order);
} else {
migratetype = get_pfnblock_migratetype(page, pfn);
local_irq_save(flags);