diff mbox series

[v3,2/5] KVM: arm64: nvhe: Synchronise with page table walker on TLBI

Message ID 20230413081441.165134-3-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Synchronise speculative page table walks on translation regime change | expand

Commit Message

Marc Zyngier April 13, 2023, 8:14 a.m. UTC
A TLBI from EL2 impacting EL1 involves messing with the EL1&0
translation regime, and the page table walker may still be
performing speculative walks.

Piggyback on the existing DSBs to always have a DSB ISH that
will synchronise all load/store operations that the PTW may
still have.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kvm/hyp/nvhe/tlb.c | 38 ++++++++++++++++++++++++++---------
 1 file changed, 29 insertions(+), 9 deletions(-)

Comments

Oliver Upton April 13, 2023, 3:53 p.m. UTC | #1
On Thu, Apr 13, 2023 at 09:14:38AM +0100, Marc Zyngier wrote:
> A TLBI from EL2 impacting EL1 involves messing with the EL1&0
> translation regime, and the page table walker may still be
> performing speculative walks.
> 
> Piggyback on the existing DSBs to always have a DSB ISH that
> will synchronise all load/store operations that the PTW may
> still have.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>

Reviewed-by: Oliver Upton <oliver.upton@linux.dev>

> ---
>  arch/arm64/kvm/hyp/nvhe/tlb.c | 38 ++++++++++++++++++++++++++---------
>  1 file changed, 29 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
> index d296d617f589..1da2fc35f94e 100644
> --- a/arch/arm64/kvm/hyp/nvhe/tlb.c
> +++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
> @@ -15,8 +15,31 @@ struct tlb_inv_context {
>  };
>  
>  static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
> -				  struct tlb_inv_context *cxt)
> +				  struct tlb_inv_context *cxt,
> +				  bool nsh)
>  {
> +	/*
> +	 * We have two requirements:
> +	 *
> +	 * - ensure that the page table updates are visible to all
> +         *   CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN
> +         *   being either ish or nsh, depending on the invalidation
> +         *   type.
> +	 *
> +	 * - complete any speculative page table walk started before
> +         *   we trapped to EL2 so that we can mess with the MM
> +         *   registers out of context, for which dsb(nsh) is enough

Looks like a few of these lines are indented with spaces, not tabs. Mind
fixing this when you apply the patches?
Marc Zyngier April 14, 2023, 7:24 a.m. UTC | #2
On Thu, 13 Apr 2023 16:53:04 +0100,
Oliver Upton <oliver.upton@linux.dev> wrote:
> 
> On Thu, Apr 13, 2023 at 09:14:38AM +0100, Marc Zyngier wrote:
> > A TLBI from EL2 impacting EL1 involves messing with the EL1&0
> > translation regime, and the page table walker may still be
> > performing speculative walks.
> > 
> > Piggyback on the existing DSBs to always have a DSB ISH that
> > will synchronise all load/store operations that the PTW may
> > still have.
> > 
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> 
> Reviewed-by: Oliver Upton <oliver.upton@linux.dev>

Thanks!

> 
> > ---
> >  arch/arm64/kvm/hyp/nvhe/tlb.c | 38 ++++++++++++++++++++++++++---------
> >  1 file changed, 29 insertions(+), 9 deletions(-)
> > 
> > diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
> > index d296d617f589..1da2fc35f94e 100644
> > --- a/arch/arm64/kvm/hyp/nvhe/tlb.c
> > +++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
> > @@ -15,8 +15,31 @@ struct tlb_inv_context {
> >  };
> >  
> >  static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
> > -				  struct tlb_inv_context *cxt)
> > +				  struct tlb_inv_context *cxt,
> > +				  bool nsh)
> >  {
> > +	/*
> > +	 * We have two requirements:
> > +	 *
> > +	 * - ensure that the page table updates are visible to all
> > +         *   CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN
> > +         *   being either ish or nsh, depending on the invalidation
> > +         *   type.
> > +	 *
> > +	 * - complete any speculative page table walk started before
> > +         *   we trapped to EL2 so that we can mess with the MM
> > +         *   registers out of context, for which dsb(nsh) is enough
> 
> Looks like a few of these lines are indented with spaces, not tabs. Mind
> fixing this when you apply the patches?

Ah, well spotted. Now fixed.

Cheers,

	M.
diff mbox series

Patch

diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index d296d617f589..1da2fc35f94e 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -15,8 +15,31 @@  struct tlb_inv_context {
 };
 
 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
-				  struct tlb_inv_context *cxt)
+				  struct tlb_inv_context *cxt,
+				  bool nsh)
 {
+	/*
+	 * We have two requirements:
+	 *
+	 * - ensure that the page table updates are visible to all
+         *   CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN
+         *   being either ish or nsh, depending on the invalidation
+         *   type.
+	 *
+	 * - complete any speculative page table walk started before
+         *   we trapped to EL2 so that we can mess with the MM
+         *   registers out of context, for which dsb(nsh) is enough
+	 *
+	 * The composition of these two barriers is a dsb(DOMAIN), and
+	 * the 'nsh' parameter tracks the distinction between
+	 * Inner-Shareable and Non-Shareable, as specified by the
+	 * callers.
+	 */
+	if (nsh)
+		dsb(nsh);
+	else
+		dsb(ish);
+
 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
 		u64 val;
 
@@ -60,10 +83,8 @@  void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
 {
 	struct tlb_inv_context cxt;
 
-	dsb(ishst);
-
 	/* Switch to requested VMID */
-	__tlb_switch_to_guest(mmu, &cxt);
+	__tlb_switch_to_guest(mmu, &cxt, false);
 
 	/*
 	 * We could do so much better if we had the VA as well.
@@ -113,10 +134,8 @@  void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
 {
 	struct tlb_inv_context cxt;
 
-	dsb(ishst);
-
 	/* Switch to requested VMID */
-	__tlb_switch_to_guest(mmu, &cxt);
+	__tlb_switch_to_guest(mmu, &cxt, false);
 
 	__tlbi(vmalls12e1is);
 	dsb(ish);
@@ -130,7 +149,7 @@  void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
 	struct tlb_inv_context cxt;
 
 	/* Switch to requested VMID */
-	__tlb_switch_to_guest(mmu, &cxt);
+	__tlb_switch_to_guest(mmu, &cxt, false);
 
 	__tlbi(vmalle1);
 	asm volatile("ic iallu");
@@ -142,7 +161,8 @@  void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
 
 void __kvm_flush_vm_context(void)
 {
-	dsb(ishst);
+	/* Same remark as in __tlb_switch_to_guest() */
+	dsb(ish);
 	__tlbi(alle1is);
 
 	/*