diff mbox series

[3/7] KVM: arm64: Add save/restore support for FPMR

Message ID 20240708154438.1218186-4-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Add support for FP8 | expand

Commit Message

Marc Zyngier July 8, 2024, 3:44 p.m. UTC
Just like the rest of the FP/SIMD state, FPMR needs to be context
switched.

The only interesting thing here is that we need to treat the pKVM
part a bit differently, as the host FP state is never written back
to the vcpu thread, but instead stored locally and eagerly restored.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_host.h  | 10 ++++++++++
 arch/arm64/kvm/fpsimd.c            |  1 +
 arch/arm64/kvm/hyp/nvhe/hyp-main.c |  4 ++++
 arch/arm64/kvm/hyp/nvhe/switch.c   | 10 ++++++++++
 arch/arm64/kvm/hyp/vhe/switch.c    |  4 ++++
 5 files changed, 29 insertions(+)

Comments

Mark Brown July 8, 2024, 5:34 p.m. UTC | #1
On Mon, Jul 08, 2024 at 04:44:34PM +0100, Marc Zyngier wrote:
> Just like the rest of the FP/SIMD state, FPMR needs to be context
> switched.

> The only interesting thing here is that we need to treat the pKVM
> part a bit differently, as the host FP state is never written back
> to the vcpu thread, but instead stored locally and eagerly restored.

> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/include/asm/kvm_host.h  | 10 ++++++++++
>  arch/arm64/kvm/fpsimd.c            |  1 +
>  arch/arm64/kvm/hyp/nvhe/hyp-main.c |  4 ++++
>  arch/arm64/kvm/hyp/nvhe/switch.c   | 10 ++++++++++
>  arch/arm64/kvm/hyp/vhe/switch.c    |  4 ++++
>  5 files changed, 29 insertions(+)

I'm possibly missing something here but I'm not seeing where we load the
state for the guest, especially in the VHE case.  I would expect to see
a change in kvm_hyp_handle_fpsimd() to load FPMR for guests with the
feature (it needs to be in there to keep in sync with the ownership
tracking for the rest of the FP state, and to avoid loading needlessly
in cases where the guest never touches FP).

Saving for the guest was handled in the previous patch.

> diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
> index 77010b76c150f..a307c1d5ac874 100644
> --- a/arch/arm64/kvm/hyp/vhe/switch.c
> +++ b/arch/arm64/kvm/hyp/vhe/switch.c
> @@ -312,6 +312,10 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
>  static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
>  {
>  	__fpsimd_save_state(*host_data_ptr(fpsimd_state));
> +
> +	if (system_supports_fpmr() &&
> +	    kvm_has_feat(vcpu->kvm, ID_AA64PFR2_EL1, FPMR, IMP))
> +		**host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
>  }

That's only saving the host state, it doesn't load the guest state.
Marc Zyngier July 8, 2024, 5:47 p.m. UTC | #2
On Mon, 08 Jul 2024 18:34:36 +0100,
Mark Brown <broonie@kernel.org> wrote:
> 
> [1  <text/plain; us-ascii (7bit)>]
> On Mon, Jul 08, 2024 at 04:44:34PM +0100, Marc Zyngier wrote:
> > Just like the rest of the FP/SIMD state, FPMR needs to be context
> > switched.
> 
> > The only interesting thing here is that we need to treat the pKVM
> > part a bit differently, as the host FP state is never written back
> > to the vcpu thread, but instead stored locally and eagerly restored.
> 
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> >  arch/arm64/include/asm/kvm_host.h  | 10 ++++++++++
> >  arch/arm64/kvm/fpsimd.c            |  1 +
> >  arch/arm64/kvm/hyp/nvhe/hyp-main.c |  4 ++++
> >  arch/arm64/kvm/hyp/nvhe/switch.c   | 10 ++++++++++
> >  arch/arm64/kvm/hyp/vhe/switch.c    |  4 ++++
> >  5 files changed, 29 insertions(+)
> 
> I'm possibly missing something here but I'm not seeing where we load the
> state for the guest, especially in the VHE case.  I would expect to see
> a change in kvm_hyp_handle_fpsimd() to load FPMR for guests with the
> feature (it needs to be in there to keep in sync with the ownership
> tracking for the rest of the FP state, and to avoid loading needlessly
> in cases where the guest never touches FP).
> 
> Saving for the guest was handled in the previous patch.
> 
> > diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
> > index 77010b76c150f..a307c1d5ac874 100644
> > --- a/arch/arm64/kvm/hyp/vhe/switch.c
> > +++ b/arch/arm64/kvm/hyp/vhe/switch.c
> > @@ -312,6 +312,10 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
> >  static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
> >  {
> >  	__fpsimd_save_state(*host_data_ptr(fpsimd_state));
> > +
> > +	if (system_supports_fpmr() &&
> > +	    kvm_has_feat(vcpu->kvm, ID_AA64PFR2_EL1, FPMR, IMP))
> > +		**host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
> >  }
> 
> That's only saving the host state, it doesn't load the guest state.

Ah, I forgot to cherry-pick the fixes. Fsck knows what else I forgot.
Thanks for reminding me.

	M.

diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index f59ccfe11ab9a..52c7dc8446f16 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -404,6 +404,10 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
 	else
 		__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
 
+	if (system_supports_fpmr() &&
+	    kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64PFR2_EL1, FPMR, IMP))
+		write_sysreg_s(__vcpu_sys_reg(vcpu, FPMR), SYS_FPMR);
+
 	/* Skip restoring fpexc32 for AArch64 guests */
 	if (!(read_sysreg(hcr_el2) & HCR_RW))
 		write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
Marc Zyngier July 8, 2024, 5:53 p.m. UTC | #3
On Mon, 08 Jul 2024 18:47:45 +0100,
Marc Zyngier <maz@kernel.org> wrote:
> 
> On Mon, 08 Jul 2024 18:34:36 +0100,
> Mark Brown <broonie@kernel.org> wrote:
> > 
> > [1  <text/plain; us-ascii (7bit)>]
> > On Mon, Jul 08, 2024 at 04:44:34PM +0100, Marc Zyngier wrote:
> > > Just like the rest of the FP/SIMD state, FPMR needs to be context
> > > switched.
> > 
> > > The only interesting thing here is that we need to treat the pKVM
> > > part a bit differently, as the host FP state is never written back
> > > to the vcpu thread, but instead stored locally and eagerly restored.
> > 
> > > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > > ---
> > >  arch/arm64/include/asm/kvm_host.h  | 10 ++++++++++
> > >  arch/arm64/kvm/fpsimd.c            |  1 +
> > >  arch/arm64/kvm/hyp/nvhe/hyp-main.c |  4 ++++
> > >  arch/arm64/kvm/hyp/nvhe/switch.c   | 10 ++++++++++
> > >  arch/arm64/kvm/hyp/vhe/switch.c    |  4 ++++
> > >  5 files changed, 29 insertions(+)
> > 
> > I'm possibly missing something here but I'm not seeing where we load the
> > state for the guest, especially in the VHE case.  I would expect to see
> > a change in kvm_hyp_handle_fpsimd() to load FPMR for guests with the
> > feature (it needs to be in there to keep in sync with the ownership
> > tracking for the rest of the FP state, and to avoid loading needlessly
> > in cases where the guest never touches FP).
> > 
> > Saving for the guest was handled in the previous patch.
> > 
> > > diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
> > > index 77010b76c150f..a307c1d5ac874 100644
> > > --- a/arch/arm64/kvm/hyp/vhe/switch.c
> > > +++ b/arch/arm64/kvm/hyp/vhe/switch.c
> > > @@ -312,6 +312,10 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
> > >  static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
> > >  {
> > >  	__fpsimd_save_state(*host_data_ptr(fpsimd_state));
> > > +
> > > +	if (system_supports_fpmr() &&
> > > +	    kvm_has_feat(vcpu->kvm, ID_AA64PFR2_EL1, FPMR, IMP))
> > > +		**host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
> > >  }
> > 
> > That's only saving the host state, it doesn't load the guest state.
> 
> Ah, I forgot to cherry-pick the fixes. Fsck knows what else I forgot.
> Thanks for reminding me.
> 
> 	M.
> 
> diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
> index f59ccfe11ab9a..52c7dc8446f16 100644
> --- a/arch/arm64/kvm/hyp/include/hyp/switch.h
> +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
> @@ -404,6 +404,10 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
>  	else
>  		__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
>  
> +	if (system_supports_fpmr() &&
> +	    kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64PFR2_EL1, FPMR, IMP))
> +		write_sysreg_s(__vcpu_sys_reg(vcpu, FPMR), SYS_FPMR);
> +
>  	/* Skip restoring fpexc32 for AArch64 guests */
>  	if (!(read_sysreg(hcr_el2) & HCR_RW))
>  		write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
> 

And thinking of it, that's not enough. I'm missing the pKVM angle that
needs to be special cased. Drat.

	M.
Marc Zyngier July 9, 2024, 9:06 a.m. UTC | #4
On Mon, 08 Jul 2024 18:53:39 +0100,
Marc Zyngier <maz@kernel.org> wrote:
> 
> On Mon, 08 Jul 2024 18:47:45 +0100,
> Marc Zyngier <maz@kernel.org> wrote:
> > 
> > On Mon, 08 Jul 2024 18:34:36 +0100,
> > Mark Brown <broonie@kernel.org> wrote:
> > > 
> > > [1  <text/plain; us-ascii (7bit)>]
> > > On Mon, Jul 08, 2024 at 04:44:34PM +0100, Marc Zyngier wrote:
> > > > Just like the rest of the FP/SIMD state, FPMR needs to be context
> > > > switched.
> > > 
> > > > The only interesting thing here is that we need to treat the pKVM
> > > > part a bit differently, as the host FP state is never written back
> > > > to the vcpu thread, but instead stored locally and eagerly restored.
> > > 
> > > > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > > > ---
> > > >  arch/arm64/include/asm/kvm_host.h  | 10 ++++++++++
> > > >  arch/arm64/kvm/fpsimd.c            |  1 +
> > > >  arch/arm64/kvm/hyp/nvhe/hyp-main.c |  4 ++++
> > > >  arch/arm64/kvm/hyp/nvhe/switch.c   | 10 ++++++++++
> > > >  arch/arm64/kvm/hyp/vhe/switch.c    |  4 ++++
> > > >  5 files changed, 29 insertions(+)
> > > 
> > > I'm possibly missing something here but I'm not seeing where we load the
> > > state for the guest, especially in the VHE case.  I would expect to see
> > > a change in kvm_hyp_handle_fpsimd() to load FPMR for guests with the
> > > feature (it needs to be in there to keep in sync with the ownership
> > > tracking for the rest of the FP state, and to avoid loading needlessly
> > > in cases where the guest never touches FP).
> > > 
> > > Saving for the guest was handled in the previous patch.
> > > 
> > > > diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
> > > > index 77010b76c150f..a307c1d5ac874 100644
> > > > --- a/arch/arm64/kvm/hyp/vhe/switch.c
> > > > +++ b/arch/arm64/kvm/hyp/vhe/switch.c
> > > > @@ -312,6 +312,10 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
> > > >  static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
> > > >  {
> > > >  	__fpsimd_save_state(*host_data_ptr(fpsimd_state));
> > > > +
> > > > +	if (system_supports_fpmr() &&
> > > > +	    kvm_has_feat(vcpu->kvm, ID_AA64PFR2_EL1, FPMR, IMP))
> > > > +		**host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
> > > >  }
> > > 
> > > That's only saving the host state, it doesn't load the guest
> > > state.

So after cherry-picking my own fixes and realising that I had left
pKVM in the lurch, this is what I have added to this patch, which
hopefully does the right thing.

diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index f59ccfe11ab9a..52c7dc8446f16 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -404,6 +404,10 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
 	else
 		__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
 
+	if (system_supports_fpmr() &&
+	    kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64PFR2_EL1, FPMR, IMP))
+		write_sysreg_s(__vcpu_sys_reg(vcpu, FPMR), SYS_FPMR);
+
 	/* Skip restoring fpexc32 for AArch64 guests */
 	if (!(read_sysreg(hcr_el2) & HCR_RW))
 		write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 6b14a2c13e287..97fc6ae123a03 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -62,6 +62,8 @@ static void fpsimd_sve_flush(void)
 
 static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
 {
+	bool has_fpmr;
+
 	if (!guest_owns_fp_regs())
 		return;
 
@@ -73,13 +75,17 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
 	else
 		__fpsimd_save_state(&vcpu->arch.ctxt.fp_regs);
 
+	has_fpmr = (system_supports_fpmr() &&
+		    kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64PFR2_EL1, FPMR, IMP));
+	if (has_fpmr)
+		__vcpu_sys_reg(vcpu, FPMR) = read_sysreg_s(SYS_FPMR);
+
 	if (system_supports_sve())
 		__hyp_sve_restore_host();
 	else
 		__fpsimd_restore_state(*host_data_ptr(fpsimd_state));
 
-	if (system_supports_fpmr() &&
-	    kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64PFR2_EL1, FPMR, IMP))
+	if (has_fpmr)
 		write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR);
 
 	*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;

I'll repost the series after -rc1.

	M.
Mark Brown July 9, 2024, 3:43 p.m. UTC | #5
On Tue, Jul 09, 2024 at 10:06:06AM +0100, Marc Zyngier wrote:

> So after cherry-picking my own fixes and realising that I had left
> pKVM in the lurch, this is what I have added to this patch, which
> hopefully does the right thing.

This looks good to me from review, unfortunately I've got IT issues
at the moment which mean I can't test anything with a model with FPMR
support.  Whenever those get resolved I'll try to take this for a spin.
Mark Brown July 12, 2024, 3:28 p.m. UTC | #6
On Tue, Jul 09, 2024 at 10:06:06AM +0100, Marc Zyngier wrote:

> So after cherry-picking my own fixes and realising that I had left
> pKVM in the lurch, this is what I have added to this patch, which
> hopefully does the right thing.

My IT issues were resolved so I've kicked the tires on this and it all
seems to be working, I'll go through again a bit more thorougly when you
repost but I'd be surprised if there were any issues.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index a14c18e8b173a..764d23082eb91 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -599,6 +599,16 @@  struct kvm_host_data {
 		struct cpu_sve_state *sve_state;
 	};
 
+	union {
+		/* HYP VA pointer to the host storage for FPMR */
+		u64	*fpmr_ptr;
+		/*
+		 * Used by pKVM only, as it needs to provide storage
+		 * for the host
+		 */
+		u64	fpmr;
+	};
+
 	/* Ownership of the FP regs */
 	enum {
 		FP_STATE_FREE,
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 4cb8ad5d69a80..ea5484ce1f3ba 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -63,6 +63,7 @@  void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
 	 */
 	*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
 	*host_data_ptr(fpsimd_state) = kern_hyp_va(&current->thread.uw.fpsimd_state);
+	*host_data_ptr(fpmr_ptr) = kern_hyp_va(&current->thread.uw.fpmr);
 
 	vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
 	if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index f43d845f3c4ec..6b14a2c13e287 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -78,6 +78,10 @@  static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
 	else
 		__fpsimd_restore_state(*host_data_ptr(fpsimd_state));
 
+	if (system_supports_fpmr() &&
+	    kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64PFR2_EL1, FPMR, IMP))
+		write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR);
+
 	*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
 }
 
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 6af179c6356d6..47d24ecd68fec 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -198,6 +198,16 @@  static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
 	} else {
 		__fpsimd_save_state(*host_data_ptr(fpsimd_state));
 	}
+
+	if (system_supports_fpmr() &&
+	    kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64PFR2_EL1, FPMR, IMP)) {
+		u64 fpmr = read_sysreg_s(SYS_FPMR);
+
+		if (unlikely(is_protected_kvm_enabled()))
+			*host_data_ptr(fpmr) = fpmr;
+		else
+			**host_data_ptr(fpmr_ptr) = fpmr;
+	}
 }
 
 static const exit_handler_fn hyp_exit_handlers[] = {
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 77010b76c150f..a307c1d5ac874 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -312,6 +312,10 @@  static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
 static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
 {
 	__fpsimd_save_state(*host_data_ptr(fpsimd_state));
+
+	if (system_supports_fpmr() &&
+	    kvm_has_feat(vcpu->kvm, ID_AA64PFR2_EL1, FPMR, IMP))
+		**host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
 }
 
 static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)