diff mbox series

[v3,13/15] KVM: arm64: Trap access to pVM restricted features

Message ID 20210719160346.609914-14-tabba@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Fixed features for protected VMs | expand

Commit Message

Fuad Tabba July 19, 2021, 4:03 p.m. UTC
Trap accesses to restricted features for VMs running in protected
mode.

Access to feature registers are emulated, and only supported
features are exposed to protected VMs.

Accesses to restricted registers as well as restricted
instructions are trapped, and an undefined exception is injected
into the protected guests, i.e., with EC = 0x0 (unknown reason).
This EC is the one used, according to the Arm Architecture
Reference Manual, for unallocated or undefined system registers
or instructions.

Only affects the functionality of protected VMs. Otherwise,
should not affect non-protected VMs when KVM is running in
protected mode.

Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/kvm/hyp/include/hyp/switch.h |  3 ++
 arch/arm64/kvm/hyp/nvhe/switch.c        | 52 ++++++++++++++++++-------
 2 files changed, 41 insertions(+), 14 deletions(-)

Comments

Will Deacon Aug. 12, 2021, 9:53 a.m. UTC | #1
On Mon, Jul 19, 2021 at 05:03:44PM +0100, Fuad Tabba wrote:
> Trap accesses to restricted features for VMs running in protected
> mode.
> 
> Access to feature registers are emulated, and only supported
> features are exposed to protected VMs.
> 
> Accesses to restricted registers as well as restricted
> instructions are trapped, and an undefined exception is injected
> into the protected guests, i.e., with EC = 0x0 (unknown reason).
> This EC is the one used, according to the Arm Architecture
> Reference Manual, for unallocated or undefined system registers
> or instructions.
> 
> Only affects the functionality of protected VMs. Otherwise,
> should not affect non-protected VMs when KVM is running in
> protected mode.
> 
> Signed-off-by: Fuad Tabba <tabba@google.com>
> ---
>  arch/arm64/kvm/hyp/include/hyp/switch.h |  3 ++
>  arch/arm64/kvm/hyp/nvhe/switch.c        | 52 ++++++++++++++++++-------
>  2 files changed, 41 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
> index 5a2b89b96c67..8431f1514280 100644
> --- a/arch/arm64/kvm/hyp/include/hyp/switch.h
> +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
> @@ -33,6 +33,9 @@
>  extern struct exception_table_entry __start___kvm_ex_table;
>  extern struct exception_table_entry __stop___kvm_ex_table;
>  
> +int kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu);
> +int kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu);
> +
>  /* Check whether the FP regs were dirtied while in the host-side run loop: */
>  static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
>  {
> diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
> index 36da423006bd..99bbbba90094 100644
> --- a/arch/arm64/kvm/hyp/nvhe/switch.c
> +++ b/arch/arm64/kvm/hyp/nvhe/switch.c
> @@ -158,30 +158,54 @@ static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
>  		write_sysreg(pmu->events_host, pmcntenset_el0);
>  }
>  
> +/**
> + * Handle system register accesses for protected VMs.
> + *
> + * Return 1 if handled, or 0 if not.
> + */
> +static int handle_pvm_sys64(struct kvm_vcpu *vcpu)
> +{
> +	return kvm_vm_is_protected(kern_hyp_va(vcpu->kvm)) ?
> +			     kvm_handle_pvm_sys64(vcpu) :
> +			     0;
> +}

Why don't we move the kvm_vm_is_protected() check into
kvm_get_hyp_exit_handler() so we can avoid adding it to each handler
instead?

Either way:

Acked-by: Will Deacon <will@kernel.org>

Will
diff mbox series

Patch

diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 5a2b89b96c67..8431f1514280 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -33,6 +33,9 @@ 
 extern struct exception_table_entry __start___kvm_ex_table;
 extern struct exception_table_entry __stop___kvm_ex_table;
 
+int kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu);
+int kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu);
+
 /* Check whether the FP regs were dirtied while in the host-side run loop: */
 static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
 {
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 36da423006bd..99bbbba90094 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -158,30 +158,54 @@  static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
 		write_sysreg(pmu->events_host, pmcntenset_el0);
 }
 
+/**
+ * Handle system register accesses for protected VMs.
+ *
+ * Return 1 if handled, or 0 if not.
+ */
+static int handle_pvm_sys64(struct kvm_vcpu *vcpu)
+{
+	return kvm_vm_is_protected(kern_hyp_va(vcpu->kvm)) ?
+			     kvm_handle_pvm_sys64(vcpu) :
+			     0;
+}
+
+/**
+ * Handle restricted feature accesses for protected VMs.
+ *
+ * Return 1 if handled, or 0 if not.
+ */
+static int handle_pvm_restricted(struct kvm_vcpu *vcpu)
+{
+	return kvm_vm_is_protected(kern_hyp_va(vcpu->kvm)) ?
+			     kvm_handle_pvm_restricted(vcpu) :
+			     0;
+}
+
 typedef int (*exit_handle_fn)(struct kvm_vcpu *);
 
 static exit_handle_fn hyp_exit_handlers[] = {
-	[0 ... ESR_ELx_EC_MAX]		= NULL,
+	[0 ... ESR_ELx_EC_MAX]		= handle_pvm_restricted,
 	[ESR_ELx_EC_WFx]		= NULL,
-	[ESR_ELx_EC_CP15_32]		= NULL,
-	[ESR_ELx_EC_CP15_64]		= NULL,
-	[ESR_ELx_EC_CP14_MR]		= NULL,
-	[ESR_ELx_EC_CP14_LS]		= NULL,
-	[ESR_ELx_EC_CP14_64]		= NULL,
+	[ESR_ELx_EC_CP15_32]		= handle_pvm_restricted,
+	[ESR_ELx_EC_CP15_64]		= handle_pvm_restricted,
+	[ESR_ELx_EC_CP14_MR]		= handle_pvm_restricted,
+	[ESR_ELx_EC_CP14_LS]		= handle_pvm_restricted,
+	[ESR_ELx_EC_CP14_64]		= handle_pvm_restricted,
 	[ESR_ELx_EC_HVC32]		= NULL,
 	[ESR_ELx_EC_SMC32]		= NULL,
 	[ESR_ELx_EC_HVC64]		= NULL,
 	[ESR_ELx_EC_SMC64]		= NULL,
-	[ESR_ELx_EC_SYS64]		= NULL,
-	[ESR_ELx_EC_SVE]		= NULL,
+	[ESR_ELx_EC_SYS64]		= handle_pvm_sys64,
+	[ESR_ELx_EC_SVE]		= handle_pvm_restricted,
 	[ESR_ELx_EC_IABT_LOW]		= NULL,
 	[ESR_ELx_EC_DABT_LOW]		= NULL,
-	[ESR_ELx_EC_SOFTSTP_LOW]	= NULL,
-	[ESR_ELx_EC_WATCHPT_LOW]	= NULL,
-	[ESR_ELx_EC_BREAKPT_LOW]	= NULL,
-	[ESR_ELx_EC_BKPT32]		= NULL,
-	[ESR_ELx_EC_BRK64]		= NULL,
-	[ESR_ELx_EC_FP_ASIMD]		= NULL,
+	[ESR_ELx_EC_SOFTSTP_LOW]	= handle_pvm_restricted,
+	[ESR_ELx_EC_WATCHPT_LOW]	= handle_pvm_restricted,
+	[ESR_ELx_EC_BREAKPT_LOW]	= handle_pvm_restricted,
+	[ESR_ELx_EC_BKPT32]		= handle_pvm_restricted,
+	[ESR_ELx_EC_BRK64]		= handle_pvm_restricted,
+	[ESR_ELx_EC_FP_ASIMD]		= handle_pvm_restricted,
 	[ESR_ELx_EC_PAC]		= NULL,
 };