diff mbox series

[RFC,17/31] mm: thp: split properly PMD-mapped PUD THP to PTE-mapped PUD THP.

Message ID 20190215220856.29749-18-zi.yan@sent.com (mailing list archive)
State New, archived
Headers show
Series Generating physically contiguous memory after page allocation | expand

Commit Message

Zi Yan Feb. 15, 2019, 10:08 p.m. UTC
From: Zi Yan <ziy@nvidia.com>

Page count increase needs to goto the head of the PUD page.

Signed-off-by: Zi Yan <ziy@nvidia.com>
---
 mm/huge_memory.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5f83f4c5eac7..bbdbc9ae06bf 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3198,7 +3198,7 @@  static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
 		unsigned long haddr, bool freeze)
 {
 	struct mm_struct *mm = vma->vm_mm;
-	struct page *page;
+	struct page *page, *head;
 	pgtable_t pgtable;
 	pmd_t old_pmd, _pmd;
 	bool young, write, soft_dirty, pmd_migration = false;
@@ -3285,7 +3285,8 @@  static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
 		soft_dirty = pmd_soft_dirty(old_pmd);
 	}
 	VM_BUG_ON_PAGE(!page_count(page), page);
-	page_ref_add(page, HPAGE_PMD_NR - 1);
+	head = compound_head(page);
+	page_ref_add(head, HPAGE_PMD_NR - 1);
 
 	/*
 	 * Withdraw the table only after we mark the pmd entry invalid.