diff mbox series

[v2,5/5] mm/khugepaged: Convert collapse_pte_mapped_thp() to use folios

Message ID 20231018203213.50224-6-vishal.moola@gmail.com (mailing list archive)
State New
Headers show
Series Some khugepaged folio conversions | expand

Commit Message

Vishal Moola Oct. 18, 2023, 8:32 p.m. UTC
This removes 2 calls to compound_head() and helps convert khugepaged to
use folios throughout.

Previously, if the address passed to collapse_pte_mapped_thp()
corresponded to a tail page, the scan would fail immediately. Using
filemap_lock_folio() we can get the corresponding folio back and try to
operate on the folio instead.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 mm/khugepaged.c | 45 ++++++++++++++++++++-------------------------
 1 file changed, 20 insertions(+), 25 deletions(-)

Comments

Vishal Moola Oct. 18, 2023, 11:01 p.m. UTC | #1
On Wed, Oct 18, 2023 at 01:32:13PM -0700, Vishal Moola (Oracle) wrote:
> This removes 2 calls to compound_head() and helps convert khugepaged to
> use folios throughout.
> 
> Previously, if the address passed to collapse_pte_mapped_thp()
> corresponded to a tail page, the scan would fail immediately. Using
> filemap_lock_folio() we can get the corresponding folio back and try to
> operate on the folio instead.
> 
> Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>

Hi Andrew, I've attached a fix patch addressing the syzbot issue.
It can be squashed into this patch, syzbot tested it on v2 already as
well.
From 34d60af8cb66d6f582c1aeade01689e94e8a0092 Mon Sep 17 00:00:00 2001
From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
Date: Wed, 18 Oct 2023 14:24:47 -0700
Subject: [PATCH] collapse_pte_mapped_thp() folio conversion fix

filemap_lock_folio() can return an ERR_PTR on failure.
find_lock_page()/pagecache_get_page() handles this internally, while
filemap_lock_folio() does not. Ensure this is checked for in the caller
after converting find_lock_page() to filemap_lock_folio().

Reported-and-tested-by: syzbot+1e2648076cadf48ad9a1@syzkaller.appspotmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 mm/khugepaged.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 94c1dd09a8a6..b944ed231792 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1510,7 +1510,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 
 	folio = filemap_lock_folio(vma->vm_file->f_mapping,
 			       linear_page_index(vma, haddr));
-	if (!folio)
+	if (!folio || IS_ERR(folio))
 		return SCAN_PAGE_NULL;
 
 	if (folio_order(folio) != HPAGE_PMD_ORDER) {
Kefeng Wang Oct. 19, 2023, 2:17 a.m. UTC | #2
On 2023/10/19 7:01, Vishal Moola wrote:
> On Wed, Oct 18, 2023 at 01:32:13PM -0700, Vishal Moola (Oracle) wrote:
>> This removes 2 calls to compound_head() and helps convert khugepaged to
>> use folios throughout.
>>
>> Previously, if the address passed to collapse_pte_mapped_thp()
>> corresponded to a tail page, the scan would fail immediately. Using
>> filemap_lock_folio() we can get the corresponding folio back and try to
>> operate on the folio instead.
>>
>> Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
> 
> Hi Andrew, I've attached a fix patch addressing the syzbot issue.
> It can be squashed into this patch, syzbot tested it on v2 already as
> well.

Hi Vishal, only IS_ERR(folio) is enough since filemap_lock_folio won't 
return NULL.
diff mbox series

Patch

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index d49aa22d99c9..94c1dd09a8a6 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1475,7 +1475,7 @@  int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 	bool notified = false;
 	unsigned long haddr = addr & HPAGE_PMD_MASK;
 	struct vm_area_struct *vma = vma_lookup(mm, haddr);
-	struct page *hpage;
+	struct folio *folio;
 	pte_t *start_pte, *pte;
 	pmd_t *pmd, pgt_pmd;
 	spinlock_t *pml = NULL, *ptl;
@@ -1508,19 +1508,14 @@  int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 	if (userfaultfd_wp(vma))
 		return SCAN_PTE_UFFD_WP;
 
-	hpage = find_lock_page(vma->vm_file->f_mapping,
+	folio = filemap_lock_folio(vma->vm_file->f_mapping,
 			       linear_page_index(vma, haddr));
-	if (!hpage)
+	if (!folio)
 		return SCAN_PAGE_NULL;
 
-	if (!PageHead(hpage)) {
-		result = SCAN_FAIL;
-		goto drop_hpage;
-	}
-
-	if (compound_order(hpage) != HPAGE_PMD_ORDER) {
+	if (folio_order(folio) != HPAGE_PMD_ORDER) {
 		result = SCAN_PAGE_COMPOUND;
-		goto drop_hpage;
+		goto drop_folio;
 	}
 
 	result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
@@ -1534,13 +1529,13 @@  int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 		 */
 		goto maybe_install_pmd;
 	default:
-		goto drop_hpage;
+		goto drop_folio;
 	}
 
 	result = SCAN_FAIL;
 	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
 	if (!start_pte)		/* mmap_lock + page lock should prevent this */
-		goto drop_hpage;
+		goto drop_folio;
 
 	/* step 1: check all mapped PTEs are to the right huge page */
 	for (i = 0, addr = haddr, pte = start_pte;
@@ -1565,7 +1560,7 @@  int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 		 * Note that uprobe, debugger, or MAP_PRIVATE may change the
 		 * page table, but the new page will not be a subpage of hpage.
 		 */
-		if (hpage + i != page)
+		if (folio_page(folio, i) != page)
 			goto abort;
 	}
 
@@ -1580,7 +1575,7 @@  int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 	 * page_table_lock) ptl nests inside pml. The less time we hold pml,
 	 * the better; but userfaultfd's mfill_atomic_pte() on a private VMA
 	 * inserts a valid as-if-COWed PTE without even looking up page cache.
-	 * So page lock of hpage does not protect from it, so we must not drop
+	 * So page lock of folio does not protect from it, so we must not drop
 	 * ptl before pgt_pmd is removed, so uffd private needs pml taken now.
 	 */
 	if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
@@ -1604,7 +1599,7 @@  int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 			continue;
 		/*
 		 * We dropped ptl after the first scan, to do the mmu_notifier:
-		 * page lock stops more PTEs of the hpage being faulted in, but
+		 * page lock stops more PTEs of the folio being faulted in, but
 		 * does not stop write faults COWing anon copies from existing
 		 * PTEs; and does not stop those being swapped out or migrated.
 		 */
@@ -1613,7 +1608,7 @@  int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 			goto abort;
 		}
 		page = vm_normal_page(vma, addr, ptent);
-		if (hpage + i != page)
+		if (folio_page(folio, i) != page)
 			goto abort;
 
 		/*
@@ -1632,8 +1627,8 @@  int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 
 	/* step 3: set proper refcount and mm_counters. */
 	if (nr_ptes) {
-		page_ref_sub(hpage, nr_ptes);
-		add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
+		folio_ref_sub(folio, nr_ptes);
+		add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
 	}
 
 	/* step 4: remove empty page table */
@@ -1657,14 +1652,14 @@  int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 maybe_install_pmd:
 	/* step 5: install pmd entry */
 	result = install_pmd
-			? set_huge_pmd(vma, haddr, pmd, hpage)
+			? set_huge_pmd(vma, haddr, pmd, &folio->page)
 			: SCAN_SUCCEED;
-	goto drop_hpage;
+	goto drop_folio;
 abort:
 	if (nr_ptes) {
 		flush_tlb_mm(mm);
-		page_ref_sub(hpage, nr_ptes);
-		add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
+		folio_ref_sub(folio, nr_ptes);
+		add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
 	}
 	if (start_pte)
 		pte_unmap_unlock(start_pte, ptl);
@@ -1672,9 +1667,9 @@  int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 		spin_unlock(pml);
 	if (notified)
 		mmu_notifier_invalidate_range_end(&range);
-drop_hpage:
-	unlock_page(hpage);
-	put_page(hpage);
+drop_folio:
+	folio_unlock(folio);
+	folio_put(folio);
 	return result;
 }