diff mbox series

[01/10] KVM: arm64: Provide KVM's own save/restore SVE primitives

Message ID 20210316101312.102925-2-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Enable SVE support on nVHE systems | expand

Commit Message

Marc Zyngier March 16, 2021, 10:13 a.m. UTC
as we are about to change the way KVM deals with SVE, provide
KVM with its own save/restore SVE primitives.

No functional change intended.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/fpsimdmacros.h   |  2 ++
 arch/arm64/include/asm/kvm_hyp.h        |  2 ++
 arch/arm64/kvm/hyp/fpsimd.S             | 10 ++++++++++
 arch/arm64/kvm/hyp/include/hyp/switch.h | 10 +++++-----
 4 files changed, 19 insertions(+), 5 deletions(-)

Comments

Quentin Perret March 16, 2021, 10:31 a.m. UTC | #1
On Tuesday 16 Mar 2021 at 10:13:03 (+0000), Marc Zyngier wrote:
> diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S
> index 01f114aa47b0..e4010d1acb79 100644
> --- a/arch/arm64/kvm/hyp/fpsimd.S
> +++ b/arch/arm64/kvm/hyp/fpsimd.S
> @@ -19,3 +19,13 @@ SYM_FUNC_START(__fpsimd_restore_state)
>  	fpsimd_restore	x0, 1
>  	ret
>  SYM_FUNC_END(__fpsimd_restore_state)
> +
> +SYM_FUNC_START(__sve_restore_state)
> +	sve_load 0, x1, x2, 3, x4
> +	ret
> +SYM_FUNC_END(__sve_restore_state)

Nit: maybe this could be named __sve_load_state() for consistency with
the EL1 version?

> +SYM_FUNC_START(__sve_save_state)
> +	sve_save 0, x1, 2
> +	ret
> +SYM_FUNC_END(__sve_restore_state)

SYM_FUNC_END(__sve_save_state) here?

Thanks,
Quentin
Marc Zyngier March 16, 2021, 12:17 p.m. UTC | #2
On Tue, 16 Mar 2021 10:31:46 +0000,
Quentin Perret <qperret@google.com> wrote:
> 
> On Tuesday 16 Mar 2021 at 10:13:03 (+0000), Marc Zyngier wrote:
> > diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S
> > index 01f114aa47b0..e4010d1acb79 100644
> > --- a/arch/arm64/kvm/hyp/fpsimd.S
> > +++ b/arch/arm64/kvm/hyp/fpsimd.S
> > @@ -19,3 +19,13 @@ SYM_FUNC_START(__fpsimd_restore_state)
> >  	fpsimd_restore	x0, 1
> >  	ret
> >  SYM_FUNC_END(__fpsimd_restore_state)
> > +
> > +SYM_FUNC_START(__sve_restore_state)
> > +	sve_load 0, x1, x2, 3, x4
> > +	ret
> > +SYM_FUNC_END(__sve_restore_state)
> 
> Nit: maybe this could be named __sve_load_state() for consistency with
> the EL1 version?

Well, we already have the discrepancy for fpsimd in the same file, so
I opted for another kind of consistency...

> 
> > +SYM_FUNC_START(__sve_save_state)
> > +	sve_save 0, x1, 2
> > +	ret
> > +SYM_FUNC_END(__sve_restore_state)
> 
> SYM_FUNC_END(__sve_save_state) here?

Yup, good catch.

Thanks,

	M.
Will Deacon March 17, 2021, 2:30 p.m. UTC | #3
On Tue, Mar 16, 2021 at 10:13:03AM +0000, Marc Zyngier wrote:
> as we are about to change the way KVM deals with SVE, provide
> KVM with its own save/restore SVE primitives.
> 
> No functional change intended.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/include/asm/fpsimdmacros.h   |  2 ++
>  arch/arm64/include/asm/kvm_hyp.h        |  2 ++
>  arch/arm64/kvm/hyp/fpsimd.S             | 10 ++++++++++
>  arch/arm64/kvm/hyp/include/hyp/switch.h | 10 +++++-----
>  4 files changed, 19 insertions(+), 5 deletions(-)

With the typo spotted by Quentin fixed:

Acked-by: Will Deacon <will@kernel.org>

Will
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index af43367534c7..e9b72d35b867 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -6,6 +6,8 @@ 
  * Author: Catalin Marinas <catalin.marinas@arm.com>
  */
 
+#include <asm/assembler.h>
+
 .macro fpsimd_save state, tmpnr
 	stp	q0, q1, [\state, #16 * 0]
 	stp	q2, q3, [\state, #16 * 2]
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index c0450828378b..e8b0f7fcd86b 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -85,6 +85,8 @@  void __debug_switch_to_host(struct kvm_vcpu *vcpu);
 
 void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
 void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
+void __sve_save_state(void *sve_pffr, u32 *fpsr);
+void __sve_restore_state(void *sve_pffr, u32 *fpsr, unsigned int vqminus1);
 
 #ifndef __KVM_NVHE_HYPERVISOR__
 void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S
index 01f114aa47b0..e4010d1acb79 100644
--- a/arch/arm64/kvm/hyp/fpsimd.S
+++ b/arch/arm64/kvm/hyp/fpsimd.S
@@ -19,3 +19,13 @@  SYM_FUNC_START(__fpsimd_restore_state)
 	fpsimd_restore	x0, 1
 	ret
 SYM_FUNC_END(__fpsimd_restore_state)
+
+SYM_FUNC_START(__sve_restore_state)
+	sve_load 0, x1, x2, 3, x4
+	ret
+SYM_FUNC_END(__sve_restore_state)
+
+SYM_FUNC_START(__sve_save_state)
+	sve_save 0, x1, 2
+	ret
+SYM_FUNC_END(__sve_restore_state)
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 54f4860cd87c..807bc4734828 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -256,8 +256,8 @@  static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
 				vcpu->arch.host_fpsimd_state,
 				struct thread_struct, uw.fpsimd_state);
 
-			sve_save_state(sve_pffr(thread),
-				       &vcpu->arch.host_fpsimd_state->fpsr);
+			__sve_save_state(sve_pffr(thread),
+					 &vcpu->arch.host_fpsimd_state->fpsr);
 		} else {
 			__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
 		}
@@ -266,9 +266,9 @@  static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
 	}
 
 	if (sve_guest) {
-		sve_load_state(vcpu_sve_pffr(vcpu),
-			       &vcpu->arch.ctxt.fp_regs.fpsr,
-			       sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
+		__sve_restore_state(vcpu_sve_pffr(vcpu),
+				    &vcpu->arch.ctxt.fp_regs.fpsr,
+				    sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
 		write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12);
 	} else {
 		__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);