@@ -1230,6 +1230,32 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
static inline void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
+ /*
+ * Some processors can start a write, but end up seeing a read-only
+ * PTE by the time they get to the Dirty bit. In this case, they
+ * will set the Dirty bit, leaving a read-only, Dirty PTE which
+ * looks like a shadow stack PTE.
+ *
+ * However, this behavior has been improved and will not occur on
+ * processors supporting shadow stack. Without this guarantee, a
+ * transition to a non-present PTE and flush the TLB would be
+ * needed.
+ *
+ * When changing a writable PTE to read-only and if the PTE has
+ * _PAGE_DIRTY_HW set, move that bit to _PAGE_COW so that the
+ * PTE is not a shadow stack PTE.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_SHSTK)) {
+ pte_t old_pte, new_pte;
+
+ do {
+ old_pte = READ_ONCE(*ptep);
+ new_pte = pte_wrprotect(old_pte);
+
+ } while (!try_cmpxchg(&ptep->pte, &old_pte.pte, new_pte.pte));
+
+ return;
+ }
clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
}
@@ -1286,6 +1312,32 @@ static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
+ /*
+ * Some processors can start a write, but end up seeing a read-only
+ * PMD by the time they get to the Dirty bit. In this case, they
+ * will set the Dirty bit, leaving a read-only, Dirty PMD which
+ * looks like a Shadow Stack PMD.
+ *
+ * However, this behavior has been improved and will not occur on
+ * processors supporting Shadow Stack. Without this guarantee, a
+ * transition to a non-present PMD and flush the TLB would be
+ * needed.
+ *
+ * When changing a writable PMD to read-only and if the PMD has
+ * _PAGE_DIRTY_HW set, we move that bit to _PAGE_COW so that the
+ * PMD is not a shadow stack PMD.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_SHSTK)) {
+ pmd_t old_pmd, new_pmd;
+
+ do {
+ old_pmd = READ_ONCE(*pmdp);
+ new_pmd = pmd_wrprotect(old_pmd);
+
+ } while (!try_cmpxchg((pmdval_t *)pmdp, (pmdval_t *)&old_pmd, pmd_val(new_pmd)));
+
+ return;
+ }
clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
}