@@ -1816,12 +1816,17 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
+ bool shstk = arch_shadow_stack_mapping(vma->vm_flags);
ptl = __pmd_trans_huge_lock(pmd, vma);
if (!ptl)
return 0;
- preserve_write = prot_numa && pmd_write(*pmd);
+ /*
+ * Preserve only normal writable huge PMD, but not shadow
+ * stack (RW=0, Dirty=1).
+ */
+ preserve_write = prot_numa && pmd_write(*pmd) && !shstk;
ret = 1;
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
@@ -75,7 +75,14 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
oldpte = *pte;
if (pte_present(oldpte)) {
pte_t ptent;
- bool preserve_write = prot_numa && pte_write(oldpte);
+ bool shstk = arch_shadow_stack_mapping(vma->vm_flags);
+ bool preserve_write;
+
+ /*
+ * Preserve only normal writable PTE, but not shadow
+ * stack (RW=0, Dirty=1).
+ */
+ preserve_write = prot_numa && pte_write(oldpte) && !shstk;
/*
* Avoid trapping faults against the zero or KSM
In change_pte_range(), when a PTE is changed for prot_numa, _PAGE_RW is preserved to avoid the additional write fault after the NUMA hinting fault. However, pte_write() now includes both normal writable and shadow stack (RW=0, Dirty=1) PTEs, but the latter does not have _PAGE_RW and has no need to preserve it. Exclude shadow stack from preserve_write test, and apply the same change to change_huge_pmd(). Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com> --- mm/huge_memory.c | 7 ++++++- mm/mprotect.c | 9 ++++++++- 2 files changed, 14 insertions(+), 2 deletions(-)