diff mbox series

[v4,26/30] x86,tlb: Make __flush_tlb_global() noinstr-compliant

Message ID 20250114175143.81438-27-vschneid@redhat.com (mailing list archive)
State New
Headers show
Series context_tracking,x86: Defer some IPIs until a user->kernel transition | expand

Commit Message

Valentin Schneider Jan. 14, 2025, 5:51 p.m. UTC
From: Peter Zijlstra <peterz@infradead.org>

Later patches will require issuing a __flush_tlb_all() from noinstr code.
This requires making both __flush_tlb_local() and __flush_tlb_global()
noinstr-compliant.

For __flush_tlb_global(), both native_flush_tlb_global() and xen_flush_tlb()
need to be made noinstr.

Forgo using __native_flush_tlb_global() / native_write_cr4() and have the
ASM directly inlined in the native function. For the Xen stuff,
__always_inline a handful of helpers.

Not-signed-off-by: Peter Zijlstra <peterz@infradead.org>
[Changelog faff]
Signed-off-by: Valentin Schneider <vschneid@redhat.com>
---
 arch/x86/include/asm/invpcid.h       | 13 ++++++-------
 arch/x86/include/asm/paravirt.h      |  2 +-
 arch/x86/include/asm/xen/hypercall.h | 11 +++++++++--
 arch/x86/mm/tlb.c                    | 15 +++++++++++----
 arch/x86/xen/mmu_pv.c                | 10 +++++-----
 arch/x86/xen/xen-ops.h               | 12 ++++++++----
 6 files changed, 40 insertions(+), 23 deletions(-)

Comments

Dave Hansen Jan. 14, 2025, 9:45 p.m. UTC | #1
On 1/14/25 09:51, Valentin Schneider wrote:
> +	cr4 = this_cpu_read(cpu_tlbstate.cr4);
> +	asm volatile("mov %0,%%cr4": : "r" (cr4 ^ X86_CR4_PGE) : "memory");
> +	asm volatile("mov %0,%%cr4": : "r" (cr4) : "memory");
> +	/*
> +	 * In lieu of not having the pinning crap, hard fail if CR4 doesn't
> +	 * match the expected value. This ensures that anybody doing dodgy gets
> +	 * the fallthrough check.
> +	 */
> +	BUG_ON(cr4 != this_cpu_read(cpu_tlbstate.cr4));

Let's say someone managed to write to cpu_tlbstate.cr4 where they
cleared one of the pinned bits.

Before this patch, CR4 pinning would WARN_ONCE() about it pretty quickly
and also reset the cleared bits.

After this patch, the first native_flush_tlb_global() can clear pinned
bits, at least until native_write_cr4() gets called the next time. That
seems like it'll undermine CR4 pinning at least somewhat.

What keeps native_write_cr4() from being noinstr-compliant now? Is it
just the WARN_ONCE()?

If so, I'd kinda rather have a native_write_cr4_nowarn() that's
noinstr-compliant but retains all the other CR4 pinning behavior. Would
something like the attached patch be _worse_?
diff mbox series

Patch

diff --git a/arch/x86/include/asm/invpcid.h b/arch/x86/include/asm/invpcid.h
index 734482afbf81d..ff26136fcd9c6 100644
--- a/arch/x86/include/asm/invpcid.h
+++ b/arch/x86/include/asm/invpcid.h
@@ -2,7 +2,7 @@ 
 #ifndef _ASM_X86_INVPCID
 #define _ASM_X86_INVPCID
 
-static inline void __invpcid(unsigned long pcid, unsigned long addr,
+static __always_inline void __invpcid(unsigned long pcid, unsigned long addr,
 			     unsigned long type)
 {
 	struct { u64 d[2]; } desc = { { pcid, addr } };
@@ -13,7 +13,7 @@  static inline void __invpcid(unsigned long pcid, unsigned long addr,
 	 * mappings, we don't want the compiler to reorder any subsequent
 	 * memory accesses before the TLB flush.
 	 */
-	asm volatile("invpcid %[desc], %[type]"
+	asm_inline volatile("invpcid %[desc], %[type]"
 		     :: [desc] "m" (desc), [type] "r" (type) : "memory");
 }
 
@@ -23,26 +23,25 @@  static inline void __invpcid(unsigned long pcid, unsigned long addr,
 #define INVPCID_TYPE_ALL_NON_GLOBAL	3
 
 /* Flush all mappings for a given pcid and addr, not including globals. */
-static inline void invpcid_flush_one(unsigned long pcid,
-				     unsigned long addr)
+static __always_inline void invpcid_flush_one(unsigned long pcid, unsigned long addr)
 {
 	__invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
 }
 
 /* Flush all mappings for a given PCID, not including globals. */
-static inline void invpcid_flush_single_context(unsigned long pcid)
+static __always_inline void invpcid_flush_single_context(unsigned long pcid)
 {
 	__invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
 }
 
 /* Flush all mappings, including globals, for all PCIDs. */
-static inline void invpcid_flush_all(void)
+static __always_inline void invpcid_flush_all(void)
 {
 	__invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
 }
 
 /* Flush all mappings for all PCIDs except globals. */
-static inline void invpcid_flush_all_nonglobals(void)
+static __always_inline void invpcid_flush_all_nonglobals(void)
 {
 	__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
 }
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index d4eb9e1d61b8e..b3daee3d46677 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -75,7 +75,7 @@  static inline void __flush_tlb_local(void)
 	PVOP_VCALL0(mmu.flush_tlb_user);
 }
 
-static inline void __flush_tlb_global(void)
+static __always_inline void __flush_tlb_global(void)
 {
 	PVOP_VCALL0(mmu.flush_tlb_kernel);
 }
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 97771b9d33af3..291e9f8006f62 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -365,8 +365,8 @@  MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
 	trace_xen_mc_entry(mcl, 4);
 }
 
-static inline void
-MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
+static __always_inline void
+__MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
 		int *success_count, domid_t domid)
 {
 	mcl->op = __HYPERVISOR_mmuext_op;
@@ -374,6 +374,13 @@  MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
 	mcl->args[1] = count;
 	mcl->args[2] = (unsigned long)success_count;
 	mcl->args[3] = domid;
+}
+
+static inline void
+MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
+		int *success_count, domid_t domid)
+{
+	__MULTI_mmuext_op(mcl, op, count, success_count, domid);
 
 	trace_xen_mc_entry(mcl, 4);
 }
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index a2becb85bea79..2d2ab3e221f0c 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -1169,9 +1169,10 @@  void flush_tlb_one_user(unsigned long addr)
 /*
  * Flush everything
  */
-STATIC_NOPV void native_flush_tlb_global(void)
+STATIC_NOPV noinstr void native_flush_tlb_global(void)
 {
 	unsigned long flags;
+	unsigned long cr4;
 
 	if (static_cpu_has(X86_FEATURE_INVPCID)) {
 		/*
@@ -1190,9 +1191,15 @@  STATIC_NOPV void native_flush_tlb_global(void)
 	 * be called from deep inside debugging code.)
 	 */
 	raw_local_irq_save(flags);
-
-	__native_tlb_flush_global(this_cpu_read(cpu_tlbstate.cr4));
-
+	cr4 = this_cpu_read(cpu_tlbstate.cr4);
+	asm volatile("mov %0,%%cr4": : "r" (cr4 ^ X86_CR4_PGE) : "memory");
+	asm volatile("mov %0,%%cr4": : "r" (cr4) : "memory");
+	/*
+	 * In lieu of not having the pinning crap, hard fail if CR4 doesn't
+	 * match the expected value. This ensures that anybody doing dodgy gets
+	 * the fallthrough check.
+	 */
+	BUG_ON(cr4 != this_cpu_read(cpu_tlbstate.cr4));
 	raw_local_irq_restore(flags);
 }
 
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 55a4996d0c04f..4eb265eb867af 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1231,22 +1231,22 @@  static noinstr void xen_write_cr2(unsigned long cr2)
 	this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
 }
 
-static noinline void xen_flush_tlb(void)
+static noinline noinstr void xen_flush_tlb(void)
 {
 	struct mmuext_op *op;
 	struct multicall_space mcs;
 
-	preempt_disable();
+	preempt_disable_notrace();
 
 	mcs = xen_mc_entry(sizeof(*op));
 
 	op = mcs.args;
 	op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
-	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+	__MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
 
-	xen_mc_issue(XEN_LAZY_MMU);
+	__xen_mc_issue(XEN_LAZY_MMU);
 
-	preempt_enable();
+	preempt_enable_notrace();
 }
 
 static void xen_flush_tlb_one_user(unsigned long addr)
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 63c13a2ccf556..effb1a54afbd1 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -235,15 +235,19 @@  static inline struct multicall_space xen_mc_entry(size_t args)
 void xen_mc_flush(void);
 
 /* Issue a multicall if we're not in a lazy mode */
-static inline void xen_mc_issue(unsigned mode)
+static __always_inline void __xen_mc_issue(unsigned mode)
 {
-	trace_xen_mc_issue(mode);
-
 	if ((xen_get_lazy_mode() & mode) == 0)
 		xen_mc_flush();
 
 	/* restore flags saved in xen_mc_batch */
-	local_irq_restore(this_cpu_read(xen_mc_irq_flags));
+	raw_local_irq_restore(this_cpu_read(xen_mc_irq_flags));
+}
+
+static inline void xen_mc_issue(unsigned mode)
+{
+	trace_xen_mc_issue(mode);
+	__xen_mc_issue(mode);
 }
 
 /* Set up a callback to be called when the current batch is flushed */