diff mbox series

[v5,1/8] KVM: arm64: Pass struct kvm to per-EC handlers

Message ID 20210827101609.2808181-2-tabba@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Fixed features for protected VMs | expand

Commit Message

Fuad Tabba Aug. 27, 2021, 10:16 a.m. UTC
We need struct kvm to check for protected VMs to be able to pick
the right handlers for them.

Mark the handler functions inline, since some handlers will be
called in future code from the protected VM handlers.

Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/kvm/hyp/include/hyp/switch.h | 16 ++++++++--------
 arch/arm64/kvm/hyp/nvhe/switch.c        |  2 +-
 arch/arm64/kvm/hyp/vhe/switch.c         |  2 +-
 3 files changed, 10 insertions(+), 10 deletions(-)

Comments

Andrew Jones Sept. 8, 2021, 12:33 p.m. UTC | #1
On Fri, Aug 27, 2021 at 11:16:02AM +0100, Fuad Tabba wrote:
> We need struct kvm to check for protected VMs to be able to pick
> the right handlers for them.
> 
> Mark the handler functions inline, since some handlers will be
> called in future code from the protected VM handlers.
> 
> Signed-off-by: Fuad Tabba <tabba@google.com>
> ---
>  arch/arm64/kvm/hyp/include/hyp/switch.h | 16 ++++++++--------
>  arch/arm64/kvm/hyp/nvhe/switch.c        |  2 +-
>  arch/arm64/kvm/hyp/vhe/switch.c         |  2 +-
>  3 files changed, 10 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
> index 0397606c0951..7cbff0ee59a5 100644
> --- a/arch/arm64/kvm/hyp/include/hyp/switch.h
> +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
> @@ -163,7 +163,7 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
>   * If FP/SIMD is not implemented, handle the trap and inject an undefined
>   * instruction exception to the guest. Similarly for trapped SVE accesses.
>   */
> -static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
> +static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
>  {
>  	bool sve_guest, sve_host;
>  	u8 esr_ec;
> @@ -318,7 +318,7 @@ static inline bool esr_is_ptrauth_trap(u32 esr)
>  
>  DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
>  
> -static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
> +static inline bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
>  {
>  	struct kvm_cpu_context *ctxt;
>  	u64 val;
> @@ -343,7 +343,7 @@ static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
>  	return true;
>  }
>  
> -static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
> +static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
>  {
>  	if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
>  	    handle_tx2_tvm(vcpu))
> @@ -356,7 +356,7 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
>  	return false;
>  }
>  
> -static bool kvm_hyp_handle_cp15(struct kvm_vcpu *vcpu, u64 *exit_code)
> +static inline bool kvm_hyp_handle_cp15(struct kvm_vcpu *vcpu, u64 *exit_code)
>  {
>  	if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
>  	    __vgic_v3_perform_cpuif_access(vcpu) == 1)
> @@ -365,7 +365,7 @@ static bool kvm_hyp_handle_cp15(struct kvm_vcpu *vcpu, u64 *exit_code)
>  	return false;
>  }
>  
> -static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
> +static inline bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
>  {
>  	if (!__populate_fault_info(vcpu))
>  		return true;
> @@ -373,7 +373,7 @@ static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
>  	return false;
>  }
>  
> -static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
> +static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
>  {
>  	if (!__populate_fault_info(vcpu))
>  		return true;
> @@ -403,7 +403,7 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
>  
>  typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
>  
> -static const exit_handler_fn *kvm_get_exit_handler_array(void);
> +const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm);
>  
>  /*
>   * Allow the hypervisor to handle the exit with an exit handler if it has one.
> @@ -413,7 +413,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(void);
>   */
>  static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
>  {
> -	const exit_handler_fn *handlers = kvm_get_exit_handler_array();
> +	const exit_handler_fn *handlers = kvm_get_exit_handler_array(kern_hyp_va(vcpu->kvm));
>  	exit_handler_fn fn;
>  
>  	fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
> diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
> index c52d580708e0..ebc0a5a4dd6a 100644
> --- a/arch/arm64/kvm/hyp/nvhe/switch.c
> +++ b/arch/arm64/kvm/hyp/nvhe/switch.c
> @@ -170,7 +170,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
>  	[ESR_ELx_EC_PAC]		= kvm_hyp_handle_ptrauth,
>  };
>  
> -static const exit_handler_fn *kvm_get_exit_handler_array(void)
> +const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm)
>  {
>  	return hyp_exit_handlers;
>  }
> diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
> index 0e0d342358f7..d823b089d3e9 100644
> --- a/arch/arm64/kvm/hyp/vhe/switch.c
> +++ b/arch/arm64/kvm/hyp/vhe/switch.c
> @@ -108,7 +108,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
>  	[ESR_ELx_EC_PAC]		= kvm_hyp_handle_ptrauth,
>  };
>  
> -static const exit_handler_fn *kvm_get_exit_handler_array(void)
> +const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm)
>  {
>  	return hyp_exit_handlers;
>  }
> -- 
> 2.33.0.259.gc128427fd7-goog
>

I have to admit my ignorance here. How do we link KVM? Are not
nvhe/switch.c and vhe/switch.c linked into the same kernel? If so,
then how does this compile after the static on
kvm_get_exit_handler_array() was removed?

Thanks,
drew
Marc Zyngier Sept. 20, 2021, 1:15 p.m. UTC | #2
On Fri, 27 Aug 2021 11:16:02 +0100,
Fuad Tabba <tabba@google.com> wrote:
> 
> We need struct kvm to check for protected VMs to be able to pick
> the right handlers for them.
> 
> Mark the handler functions inline, since some handlers will be
> called in future code from the protected VM handlers.
> 
> Signed-off-by: Fuad Tabba <tabba@google.com>
> ---
>  arch/arm64/kvm/hyp/include/hyp/switch.h | 16 ++++++++--------
>  arch/arm64/kvm/hyp/nvhe/switch.c        |  2 +-
>  arch/arm64/kvm/hyp/vhe/switch.c         |  2 +-
>  3 files changed, 10 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
> index 0397606c0951..7cbff0ee59a5 100644
> --- a/arch/arm64/kvm/hyp/include/hyp/switch.h
> +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
> @@ -163,7 +163,7 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
>   * If FP/SIMD is not implemented, handle the trap and inject an undefined
>   * instruction exception to the guest. Similarly for trapped SVE accesses.
>   */
> -static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
> +static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)

No, please don't do that. We already have function pointers for each
of these, so by doing that you are forcing the compiler to emit the
code *twice*.

Instead, call into the relevant EC handler by using the base array
that already does the non-protected handling.

>  {
>  	bool sve_guest, sve_host;
>  	u8 esr_ec;
> @@ -318,7 +318,7 @@ static inline bool esr_is_ptrauth_trap(u32 esr)
>  
>  DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
>  
> -static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
> +static inline bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
>  {
>  	struct kvm_cpu_context *ctxt;
>  	u64 val;
> @@ -343,7 +343,7 @@ static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
>  	return true;
>  }
>  
> -static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
> +static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
>  {
>  	if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
>  	    handle_tx2_tvm(vcpu))
> @@ -356,7 +356,7 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
>  	return false;
>  }
>  
> -static bool kvm_hyp_handle_cp15(struct kvm_vcpu *vcpu, u64 *exit_code)
> +static inline bool kvm_hyp_handle_cp15(struct kvm_vcpu *vcpu, u64 *exit_code)
>  {
>  	if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
>  	    __vgic_v3_perform_cpuif_access(vcpu) == 1)
> @@ -365,7 +365,7 @@ static bool kvm_hyp_handle_cp15(struct kvm_vcpu *vcpu, u64 *exit_code)
>  	return false;
>  }
>  
> -static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
> +static inline bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
>  {
>  	if (!__populate_fault_info(vcpu))
>  		return true;
> @@ -373,7 +373,7 @@ static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
>  	return false;
>  }
>  
> -static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
> +static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
>  {
>  	if (!__populate_fault_info(vcpu))
>  		return true;
> @@ -403,7 +403,7 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
>  
>  typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
>  
> -static const exit_handler_fn *kvm_get_exit_handler_array(void);
> +const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm);

Why? What breaks if when this is static? There really shouldn't be
anything else referencing this array.

Thanks,

	M.
Fuad Tabba Sept. 20, 2021, 2:08 p.m. UTC | #3
Hi Marc,

> > diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
> > index 0397606c0951..7cbff0ee59a5 100644
> > --- a/arch/arm64/kvm/hyp/include/hyp/switch.h
> > +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
> > @@ -163,7 +163,7 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
> >   * If FP/SIMD is not implemented, handle the trap and inject an undefined
> >   * instruction exception to the guest. Similarly for trapped SVE accesses.
> >   */
> > -static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
> > +static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
>
> No, please don't do that. We already have function pointers for each
> of these, so by doing that you are forcing the compiler to emit the
> code *twice*.
>
> Instead, call into the relevant EC handler by using the base array
> that already does the non-protected handling.
>
...
> > -static const exit_handler_fn *kvm_get_exit_handler_array(void);
> > +const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm);
>
> Why? What breaks if when this is static? There really shouldn't be
> anything else referencing this array.

For the two points above, the reason I did that is because later
patches call these functions from the newly added
arch/arm64/kvm/hyp/nvhe/sys_regs.c. That said, I think that the code
that calls them more naturally belongs in
arch/arm64/kvm/hyp/nvhe/switch.c instead.

I'll fix that, rebase on 5.15-rc2, and respin.

Thanks,
/fuad


> Thanks,
>
>         M.
>
> --
> Without deviation from the norm, progress is not possible.
diff mbox series

Patch

diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 0397606c0951..7cbff0ee59a5 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -163,7 +163,7 @@  static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
  * If FP/SIMD is not implemented, handle the trap and inject an undefined
  * instruction exception to the guest. Similarly for trapped SVE accesses.
  */
-static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
+static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
 	bool sve_guest, sve_host;
 	u8 esr_ec;
@@ -318,7 +318,7 @@  static inline bool esr_is_ptrauth_trap(u32 esr)
 
 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
 
-static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
+static inline bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
 	struct kvm_cpu_context *ctxt;
 	u64 val;
@@ -343,7 +343,7 @@  static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
 	return true;
 }
 
-static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
+static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
 	if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
 	    handle_tx2_tvm(vcpu))
@@ -356,7 +356,7 @@  static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
 	return false;
 }
 
-static bool kvm_hyp_handle_cp15(struct kvm_vcpu *vcpu, u64 *exit_code)
+static inline bool kvm_hyp_handle_cp15(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
 	if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
 	    __vgic_v3_perform_cpuif_access(vcpu) == 1)
@@ -365,7 +365,7 @@  static bool kvm_hyp_handle_cp15(struct kvm_vcpu *vcpu, u64 *exit_code)
 	return false;
 }
 
-static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+static inline bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
 	if (!__populate_fault_info(vcpu))
 		return true;
@@ -373,7 +373,7 @@  static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
 	return false;
 }
 
-static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
 	if (!__populate_fault_info(vcpu))
 		return true;
@@ -403,7 +403,7 @@  static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
 
 typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
 
-static const exit_handler_fn *kvm_get_exit_handler_array(void);
+const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm);
 
 /*
  * Allow the hypervisor to handle the exit with an exit handler if it has one.
@@ -413,7 +413,7 @@  static const exit_handler_fn *kvm_get_exit_handler_array(void);
  */
 static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
-	const exit_handler_fn *handlers = kvm_get_exit_handler_array();
+	const exit_handler_fn *handlers = kvm_get_exit_handler_array(kern_hyp_va(vcpu->kvm));
 	exit_handler_fn fn;
 
 	fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index c52d580708e0..ebc0a5a4dd6a 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -170,7 +170,7 @@  static const exit_handler_fn hyp_exit_handlers[] = {
 	[ESR_ELx_EC_PAC]		= kvm_hyp_handle_ptrauth,
 };
 
-static const exit_handler_fn *kvm_get_exit_handler_array(void)
+const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm)
 {
 	return hyp_exit_handlers;
 }
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 0e0d342358f7..d823b089d3e9 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -108,7 +108,7 @@  static const exit_handler_fn hyp_exit_handlers[] = {
 	[ESR_ELx_EC_PAC]		= kvm_hyp_handle_ptrauth,
 };
 
-static const exit_handler_fn *kvm_get_exit_handler_array(void)
+const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm)
 {
 	return hyp_exit_handlers;
 }