diff mbox series

[v2,05/15] KVM: arm64: nv: Load guest hyp's ZCR into EL1 state

Message ID 20240613201756.3258227-6-oliver.upton@linux.dev (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: nv: FPSIMD/SVE, plus some other CPTR goodies | expand

Commit Message

Oliver Upton June 13, 2024, 8:17 p.m. UTC
Load the guest hypervisor's ZCR_EL2 into the corresponding EL1 register
when restoring SVE state, as ZCR_EL2 affects the VL in the hypervisor
context.

Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
 arch/arm64/include/asm/kvm_host.h       | 4 ++++
 arch/arm64/kvm/hyp/include/hyp/switch.h | 3 ++-
 2 files changed, 6 insertions(+), 1 deletion(-)

Comments

Marc Zyngier June 14, 2024, 11:14 a.m. UTC | #1
On Thu, 13 Jun 2024 21:17:46 +0100,
Oliver Upton <oliver.upton@linux.dev> wrote:
> 
> Load the guest hypervisor's ZCR_EL2 into the corresponding EL1 register
> when restoring SVE state, as ZCR_EL2 affects the VL in the hypervisor
> context.
> 
> Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
> ---
>  arch/arm64/include/asm/kvm_host.h       | 4 ++++
>  arch/arm64/kvm/hyp/include/hyp/switch.h | 3 ++-
>  2 files changed, 6 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 8170c04fde91..e01e6de414f1 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -844,6 +844,10 @@ struct kvm_vcpu_arch {
>  
>  #define vcpu_sve_max_vq(vcpu)	sve_vq_from_vl((vcpu)->arch.sve_max_vl)
>  
> +#define vcpu_sve_zcr_el1(vcpu)						\
> +	(unlikely(is_hyp_ctxt(vcpu)) ? __vcpu_sys_reg(vcpu, ZCR_EL2) :	\
> +				       __vcpu_sys_reg(vcpu, ZCR_EL1))
> +

I have the feeling this abstracts the access at the wrong level. It's
not that it gives the wrong result, but it hides the register and is
only concerned with the *value*.

In turn, it makes the helper unusable with the *write* side, as shown
in patch 7.

>  #define vcpu_sve_state_size(vcpu) ({					\
>  	size_t __size_ret;						\
>  	unsigned int __vcpu_vq;						\
> diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
> index 5ecd2600d9df..71a93e336a0c 100644
> --- a/arch/arm64/kvm/hyp/include/hyp/switch.h
> +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
> @@ -317,7 +317,8 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
>  	sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
>  	__sve_restore_state(vcpu_sve_pffr(vcpu),
>  			    &vcpu->arch.ctxt.fp_regs.fpsr);
> -	write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
> +
> +	write_sysreg_el1(vcpu_sve_zcr_el1(vcpu), SYS_ZCR);
>  }
>  
>  /*

I would instead propose the following:

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index aeb1c567dfad..2c3eff0031eb 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -845,9 +845,8 @@ struct kvm_vcpu_arch {
 
 #define vcpu_sve_max_vq(vcpu)	sve_vq_from_vl((vcpu)->arch.sve_max_vl)
 
-#define vcpu_sve_zcr_el1(vcpu)						\
-	(unlikely(is_hyp_ctxt(vcpu)) ? __vcpu_sys_reg(vcpu, ZCR_EL2) :	\
-				       __vcpu_sys_reg(vcpu, ZCR_EL1))
+#define vcpu_sve_zcr_elx(vcpu)						\
+	(unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1)
 
 #define vcpu_sve_state_size(vcpu) ({					\
 	size_t __size_ret;						\
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index bb2ef3166c63..947486a111e1 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -179,10 +179,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
 			 * If the vCPU is in the hyp context then ZCR_EL1 is
 			 * loaded with its vEL2 counterpart.
 			 */
-			if (is_hyp_ctxt(vcpu))
-				__vcpu_sys_reg(vcpu, ZCR_EL2) = zcr;
-			else
-				__vcpu_sys_reg(vcpu, ZCR_EL1) = zcr;
+			__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr;
 
 			/*
 			 * Restore the VL that was saved when bound to the CPU,
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 0a6935a18490..ad8dec0b450b 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -330,7 +330,7 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
 	if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
 		sve_cond_update_zcr_vq(__vcpu_sys_reg(vcpu, ZCR_EL2), SYS_ZCR_EL2);
 
-	write_sysreg_el1(vcpu_sve_zcr_el1(vcpu), SYS_ZCR);
+	write_sysreg_el1(__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)), SYS_ZCR);
 }
 
 /*

which makes the helper select the correct guest register for the
context, and only that. In turn, the write side is much cleaner and
symmetry is restored.

Thanks,

	M.
Oliver Upton June 14, 2024, 8:08 p.m. UTC | #2
On Fri, Jun 14, 2024 at 12:14:30PM +0100, Marc Zyngier wrote:
> On Thu, 13 Jun 2024 21:17:46 +0100,
> Oliver Upton <oliver.upton@linux.dev> wrote:
> > 
> > Load the guest hypervisor's ZCR_EL2 into the corresponding EL1 register
> > when restoring SVE state, as ZCR_EL2 affects the VL in the hypervisor
> > context.
> > 
> > Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
> > ---
> >  arch/arm64/include/asm/kvm_host.h       | 4 ++++
> >  arch/arm64/kvm/hyp/include/hyp/switch.h | 3 ++-
> >  2 files changed, 6 insertions(+), 1 deletion(-)
> > 
> > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> > index 8170c04fde91..e01e6de414f1 100644
> > --- a/arch/arm64/include/asm/kvm_host.h
> > +++ b/arch/arm64/include/asm/kvm_host.h
> > @@ -844,6 +844,10 @@ struct kvm_vcpu_arch {
> >  
> >  #define vcpu_sve_max_vq(vcpu)	sve_vq_from_vl((vcpu)->arch.sve_max_vl)
> >  
> > +#define vcpu_sve_zcr_el1(vcpu)						\
> > +	(unlikely(is_hyp_ctxt(vcpu)) ? __vcpu_sys_reg(vcpu, ZCR_EL2) :	\
> > +				       __vcpu_sys_reg(vcpu, ZCR_EL1))
> > +
> 
> I have the feeling this abstracts the access at the wrong level. It's
> not that it gives the wrong result, but it hides the register and is
> only concerned with the *value*.

Agreed, this was done out of hacky convenience for myself and I didn't
revisit.

> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index aeb1c567dfad..2c3eff0031eb 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -845,9 +845,8 @@ struct kvm_vcpu_arch {
>  
>  #define vcpu_sve_max_vq(vcpu)	sve_vq_from_vl((vcpu)->arch.sve_max_vl)
>  
> -#define vcpu_sve_zcr_el1(vcpu)						\
> -	(unlikely(is_hyp_ctxt(vcpu)) ? __vcpu_sys_reg(vcpu, ZCR_EL2) :	\
> -				       __vcpu_sys_reg(vcpu, ZCR_EL1))
> +#define vcpu_sve_zcr_elx(vcpu)						\
> +	(unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1)
>  
>  #define vcpu_sve_state_size(vcpu) ({					\
>  	size_t __size_ret;						\
> diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
> index bb2ef3166c63..947486a111e1 100644
> --- a/arch/arm64/kvm/fpsimd.c
> +++ b/arch/arm64/kvm/fpsimd.c
> @@ -179,10 +179,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
>  			 * If the vCPU is in the hyp context then ZCR_EL1 is
>  			 * loaded with its vEL2 counterpart.
>  			 */
> -			if (is_hyp_ctxt(vcpu))
> -				__vcpu_sys_reg(vcpu, ZCR_EL2) = zcr;
> -			else
> -				__vcpu_sys_reg(vcpu, ZCR_EL1) = zcr;
> +			__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr;
>  
>  			/*
>  			 * Restore the VL that was saved when bound to the CPU,
> diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
> index 0a6935a18490..ad8dec0b450b 100644
> --- a/arch/arm64/kvm/hyp/include/hyp/switch.h
> +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
> @@ -330,7 +330,7 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
>  	if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
>  		sve_cond_update_zcr_vq(__vcpu_sys_reg(vcpu, ZCR_EL2), SYS_ZCR_EL2);
>  
> -	write_sysreg_el1(vcpu_sve_zcr_el1(vcpu), SYS_ZCR);
> +	write_sysreg_el1(__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)), SYS_ZCR);
>  }
>  
>  /*
> 
> which makes the helper select the correct guest register for the
> context, and only that. In turn, the write side is much cleaner and
> symmetry is restored.

LGTM, I'll squash it in.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 8170c04fde91..e01e6de414f1 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -844,6 +844,10 @@  struct kvm_vcpu_arch {
 
 #define vcpu_sve_max_vq(vcpu)	sve_vq_from_vl((vcpu)->arch.sve_max_vl)
 
+#define vcpu_sve_zcr_el1(vcpu)						\
+	(unlikely(is_hyp_ctxt(vcpu)) ? __vcpu_sys_reg(vcpu, ZCR_EL2) :	\
+				       __vcpu_sys_reg(vcpu, ZCR_EL1))
+
 #define vcpu_sve_state_size(vcpu) ({					\
 	size_t __size_ret;						\
 	unsigned int __vcpu_vq;						\
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 5ecd2600d9df..71a93e336a0c 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -317,7 +317,8 @@  static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
 	sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
 	__sve_restore_state(vcpu_sve_pffr(vcpu),
 			    &vcpu->arch.ctxt.fp_regs.fpsr);
-	write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
+
+	write_sysreg_el1(vcpu_sve_zcr_el1(vcpu), SYS_ZCR);
 }
 
 /*