@@ -2238,18 +2238,13 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
entry = maybe_mkwrite(entry, vma);
if (anon_exclusive)
SetPageAnonExclusive(page + i);
+ if (!write)
+ entry = pte_wrprotect(entry);
if (!young)
entry = pte_mkold(entry);
/* NOTE: this may set soft-dirty too on some archs */
if (dirty)
entry = pte_mkdirty(entry);
- /*
- * NOTE: this needs to happen after pte_mkdirty,
- * because some archs (sparc64, loongarch) could
- * set hw write bit when mkdirty.
- */
- if (!write)
- entry = pte_wrprotect(entry);
if (soft_dirty)
entry = pte_mksoft_dirty(entry);
if (uffd_wp)
This reverts commit 624a2c94f5b7 ("Partly revert "mm/thp: carry over dirty bit when thp splits on pmd"") and the fixup in commit e833bc503405 ("mm/thp: re-apply mkdirty for small pages after split"). Now that sparc64 mkdirty handling is fixed and no longer sets a PTE/PMD writable that shouldn't be writable, let's revert the temporary fix and remove the stale comment. The mkdirty mm selftest still passes with this change on sparc64. Note that loongarch handling was fixed in commit bf2f34a506e6 ("LoongArch: Set _PAGE_DIRTY only if _PAGE_WRITE is set in {pmd,pte}_mkdirty()") Signed-off-by: David Hildenbrand <david@redhat.com> --- mm/huge_memory.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-)