diff mbox series

[05/11] mm/huge_memory: Fix split assumption of page size

Message ID 20200908195539.25896-6-willy@infradead.org
State New
Headers show
Series Remove assumptions of THP size | expand

Commit Message

Matthew Wilcox (Oracle) Sept. 8, 2020, 7:55 p.m. UTC
From: "Kirill A. Shutemov" <kirill@shutemov.name>

File THPs may now be of arbitrary size, and we can't rely on that size
after doing the split so remember the number of pages before we start
the split.

Signed-off-by: Kirill A. Shutemov <kirill@shutemov.name>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/huge_memory.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

Comments

SeongJae Park Sept. 15, 2020, 7:23 a.m. UTC | #1
On Tue,  8 Sep 2020 20:55:32 +0100 "Matthew Wilcox (Oracle)" <willy@infradead.org> wrote:

> From: "Kirill A. Shutemov" <kirill@shutemov.name>
> 
> File THPs may now be of arbitrary size, and we can't rely on that size
> after doing the split so remember the number of pages before we start
> the split.
> 
> Signed-off-by: Kirill A. Shutemov <kirill@shutemov.name>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Reviewed-by: SeongJae Park <sjpark@amazon.de>


Thanks,
SeongJae Park
diff mbox series

Patch

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a882d770a812..7bf837c32e3f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2302,13 +2302,13 @@  static void unmap_page(struct page *page)
 	VM_BUG_ON_PAGE(!unmap_success, page);
 }
 
-static void remap_page(struct page *page)
+static void remap_page(struct page *page, unsigned int nr)
 {
 	int i;
 	if (PageTransHuge(page)) {
 		remove_migration_ptes(page, page, true);
 	} else {
-		for (i = 0; i < HPAGE_PMD_NR; i++)
+		for (i = 0; i < nr; i++)
 			remove_migration_ptes(page + i, page + i, true);
 	}
 }
@@ -2383,6 +2383,7 @@  static void __split_huge_page(struct page *page, struct list_head *list,
 	struct lruvec *lruvec;
 	struct address_space *swap_cache = NULL;
 	unsigned long offset = 0;
+	unsigned int nr = thp_nr_pages(head);
 	int i;
 
 	lruvec = mem_cgroup_page_lruvec(head, pgdat);
@@ -2398,7 +2399,7 @@  static void __split_huge_page(struct page *page, struct list_head *list,
 		xa_lock(&swap_cache->i_pages);
 	}
 
-	for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
+	for (i = nr - 1; i >= 1; i--) {
 		__split_huge_page_tail(head, i, lruvec, list);
 		/* Some pages can be beyond i_size: drop them from page cache */
 		if (head[i].index >= end) {
@@ -2418,7 +2419,7 @@  static void __split_huge_page(struct page *page, struct list_head *list,
 
 	ClearPageCompound(head);
 
-	split_page_owner(head, HPAGE_PMD_NR);
+	split_page_owner(head, nr);
 
 	/* See comment in __split_huge_page_tail() */
 	if (PageAnon(head)) {
@@ -2437,9 +2438,9 @@  static void __split_huge_page(struct page *page, struct list_head *list,
 
 	spin_unlock_irqrestore(&pgdat->lru_lock, flags);
 
-	remap_page(head);
+	remap_page(head, nr);
 
-	for (i = 0; i < HPAGE_PMD_NR; i++) {
+	for (i = 0; i < nr; i++) {
 		struct page *subpage = head + i;
 		if (subpage == page)
 			continue;
@@ -2693,7 +2694,7 @@  int split_huge_page_to_list(struct page *page, struct list_head *list)
 fail:		if (mapping)
 			xa_unlock(&mapping->i_pages);
 		spin_unlock_irqrestore(&pgdata->lru_lock, flags);
-		remap_page(head);
+		remap_page(head, thp_nr_pages(head));
 		ret = -EBUSY;
 	}