diff mbox series

[21/45] fs/proc: Create gather_pud_stats to handle PUD-mapped hugetlb pages

Message ID 20240704043132.28501-22-osalvador@suse.de (mailing list archive)
State New
Headers show
Series hugetlb pagewalk unification | expand

Commit Message

Oscar Salvador July 4, 2024, 4:31 a.m. UTC
Normal THP cannot be PUD-mapped (besides devmap), but hugetlb can, so create
gather_pud_stats in order to handle PUD-mapped hugetlb vmas.
Also implement can_gather_numa_stats_pud which is the pud version of
can_gather_numa_stats_pmd.

Signed-off-by: Oscar Salvador <osalvador@suse.de>
---
 arch/arm64/include/asm/pgtable.h |  1 +
 fs/proc/task_mmu.c               | 56 ++++++++++++++++++++++++++++++++
 2 files changed, 57 insertions(+)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 5e26e63b1012..1a6b8be2f0d0 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -590,6 +590,7 @@  static inline pmd_t pmd_mkdevmap(pmd_t pmd)
 #define pfn_pmd(pfn,prot)	__pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
 #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
 
+#define pud_dirty(pud)		pte_dirty(pud_pte(pud))
 #define pud_young(pud)		pte_young(pud_pte(pud))
 #define pud_mkyoung(pud)	pte_pud(pte_mkyoung(pud_pte(pud)))
 #define pud_write(pud)		pte_write(pud_pte(pud))
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 98dd03c26e68..5df17b7cfe6c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -3141,6 +3141,61 @@  static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
 	return page;
 }
 
+#ifdef CONFIG_HUGETLB_PAGE
+static struct page *can_gather_numa_stats_pud(pud_t pud,
+					      struct vm_area_struct *vma,
+					      unsigned long addr)
+{
+	struct page *page;
+	int nid;
+
+	if (!pud_present(pud))
+		return NULL;
+
+	page = pud_page(pud);
+	if (!page)
+		return NULL;
+
+	if (PageReserved(page))
+		return NULL;
+
+	nid = page_to_nid(page);
+	if (!node_isset(nid, node_states[N_MEMORY]))
+		return NULL;
+
+	return page;
+}
+
+static int gather_pud_stats(pud_t *pud, unsigned long addr,
+			    unsigned long end, struct mm_walk *walk)
+{
+	spinlock_t *ptl;
+	struct page *page;
+	unsigned long nr_pages;
+	struct numa_maps *md = walk->private;
+	struct vm_area_struct *vma = walk->vma;
+
+	ptl = pud_huge_lock(pud, vma);
+	if (!ptl)
+		return 0;
+
+	if (is_vm_hugetlb_page(vma))
+		nr_pages = 1;
+	else
+		nr_pages = HPAGE_PUD_SIZE / PAGE_SIZE;
+
+	page = can_gather_numa_stats_pud(*pud, vma, addr);
+	if (page)
+		gather_stats(page, md, pud_dirty(*pud),
+			     nr_pages);
+
+	spin_unlock(ptl);
+	return 0;
+}
+#else
+#define gather_pud_stats	NULL
+#endif
+
 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
 static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
 					      struct vm_area_struct *vma,
@@ -3245,6 +3300,7 @@  static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
 
 static const struct mm_walk_ops show_numa_ops = {
 	.hugetlb_entry = gather_hugetlb_stats,
+	.pud_entry = gather_pud_stats,
 	.pmd_entry = gather_pte_stats,
 	.walk_lock = PGWALK_RDLOCK,
 };