diff mbox series

[v2,12/24] kvm: arm64: Bootstrap PSCI SMC handler in nVHE EL2

Message ID 20201116204318.63987-13-dbrazdil@google.com (mailing list archive)
State New, archived
Headers show
Series Opt-in always-on nVHE hypervisor | expand

Commit Message

David Brazdil Nov. 16, 2020, 8:43 p.m. UTC
Add a handler of PSCI SMCs in nVHE hyp code. The handler is initialized
with the version used by the host's PSCI driver and the function IDs it
was configured with. If the SMC function ID matches one of the
configured PSCI calls (for v0.1) or falls into the PSCI function ID
range (for v0.2+), the SMC is handled by the PSCI handler. For now, all
SMCs return PSCI_RET_NOT_SUPPORTED.

Signed-off-by: David Brazdil <dbrazdil@google.com>
---
 arch/arm64/include/asm/kvm_hyp.h     |   4 ++
 arch/arm64/kvm/arm.c                 |  14 ++++
 arch/arm64/kvm/hyp/nvhe/Makefile     |   2 +-
 arch/arm64/kvm/hyp/nvhe/hyp-main.c   |   6 +-
 arch/arm64/kvm/hyp/nvhe/psci-relay.c | 104 +++++++++++++++++++++++++++
 5 files changed, 128 insertions(+), 2 deletions(-)
 create mode 100644 arch/arm64/kvm/hyp/nvhe/psci-relay.c

Comments

Marc Zyngier Nov. 23, 2020, 5:55 p.m. UTC | #1
On Mon, 16 Nov 2020 20:43:06 +0000,
David Brazdil <dbrazdil@google.com> wrote:
> 
> Add a handler of PSCI SMCs in nVHE hyp code. The handler is initialized
> with the version used by the host's PSCI driver and the function IDs it
> was configured with. If the SMC function ID matches one of the
> configured PSCI calls (for v0.1) or falls into the PSCI function ID
> range (for v0.2+), the SMC is handled by the PSCI handler. For now, all
> SMCs return PSCI_RET_NOT_SUPPORTED.
> 
> Signed-off-by: David Brazdil <dbrazdil@google.com>
> ---
>  arch/arm64/include/asm/kvm_hyp.h     |   4 ++
>  arch/arm64/kvm/arm.c                 |  14 ++++
>  arch/arm64/kvm/hyp/nvhe/Makefile     |   2 +-
>  arch/arm64/kvm/hyp/nvhe/hyp-main.c   |   6 +-
>  arch/arm64/kvm/hyp/nvhe/psci-relay.c | 104 +++++++++++++++++++++++++++
>  5 files changed, 128 insertions(+), 2 deletions(-)
>  create mode 100644 arch/arm64/kvm/hyp/nvhe/psci-relay.c
> 
> diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
> index a3289071f3d8..95a2bbbcc7e1 100644
> --- a/arch/arm64/include/asm/kvm_hyp.h
> +++ b/arch/arm64/include/asm/kvm_hyp.h
> @@ -96,6 +96,10 @@ void deactivate_traps_vhe_put(void);
>  
>  u64 __guest_enter(struct kvm_vcpu *vcpu);
>  
> +#ifdef __KVM_NVHE_HYPERVISOR__
> +bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
> +#endif
> +
>  void __noreturn hyp_panic(void);
>  #ifdef __KVM_NVHE_HYPERVISOR__
>  void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index cdd7981ea560..7d2270eeecfb 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -19,6 +19,7 @@
>  #include <linux/kvm_irqfd.h>
>  #include <linux/irqbypass.h>
>  #include <linux/sched/stat.h>
> +#include <linux/psci.h>
>  #include <trace/events/kvm.h>
>  
>  #define CREATE_TRACE_POINTS
> @@ -1514,6 +1515,18 @@ static void init_cpu_logical_map(void)
>  		CHOOSE_NVHE_SYM(__cpu_logical_map)[cpu] = cpu_logical_map(cpu);
>  }
>  
> +static void init_psci_relay(void)
> +{
> +	extern u32 kvm_nvhe_sym(kvm_host_psci_version);
> +	extern u32 kvm_nvhe_sym(kvm_host_psci_function_id)[PSCI_FN_MAX];

nit: I'd rather have these outside of the function body.

> +	int i;
> +
> +	CHOOSE_NVHE_SYM(kvm_host_psci_version) = psci_ops.get_version
> +		? psci_ops.get_version() : PSCI_VERSION(0, 0);

nit: please write this with an if/else construct, it will read a lot
better.

> +	for (i = 0; i < PSCI_FN_MAX; ++i)
> +		CHOOSE_NVHE_SYM(kvm_host_psci_function_id)[i] = psci_get_function_id(i);

Either pick kvm_nvhe_sym(), or CHOOSE_NVHE_SYM(). Having both used
together is just an annoyance (and in this case there is nothing to
choose, really).

> +}
> +
>  static int init_common_resources(void)
>  {
>  	return kvm_set_ipa_limit();
> @@ -1693,6 +1706,7 @@ static int init_hyp_mode(void)
>  	}
>  
>  	init_cpu_logical_map();
> +	init_psci_relay();
>  
>  	return 0;
>  
> diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
> index 2d842e009a40..bf62c8e42ab2 100644
> --- a/arch/arm64/kvm/hyp/nvhe/Makefile
> +++ b/arch/arm64/kvm/hyp/nvhe/Makefile
> @@ -7,7 +7,7 @@ asflags-y := -D__KVM_NVHE_HYPERVISOR__
>  ccflags-y := -D__KVM_NVHE_HYPERVISOR__
>  
>  obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
> -	 hyp-main.o hyp-smp.o
> +	 hyp-main.o hyp-smp.o psci-relay.o
>  obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
>  	 ../fpsimd.o ../hyp-entry.o
>  
> diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> index 71a17af05953..df4acb40dd39 100644
> --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> @@ -120,7 +120,11 @@ static void skip_host_instruction(void)
>  
>  static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
>  {
> -	default_host_smc_handler(host_ctxt);
> +	bool handled;
> +
> +	handled = kvm_host_psci_handler(host_ctxt);
> +	if (!handled)
> +		default_host_smc_handler(host_ctxt);
>  
>  	/*
>  	 * Unlike HVC, the return address of an SMC is the instruction's PC.
> diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
> new file mode 100644
> index 000000000000..d75d3f896bfd
> --- /dev/null
> +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
> @@ -0,0 +1,104 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Copyright (C) 2020 - Google LLC
> + * Author: David Brazdil <dbrazdil@google.com>
> + */
> +
> +#include <asm/kvm_asm.h>
> +#include <asm/kvm_hyp.h>
> +#include <asm/kvm_mmu.h>
> +#include <kvm/arm_hypercalls.h>
> +#include <linux/arm-smccc.h>
> +#include <linux/psci.h>
> +#include <kvm/arm_psci.h>
> +#include <uapi/linux/psci.h>
> +
> +/* Config options set by the host. */
> +u32 __ro_after_init kvm_host_psci_version = PSCI_VERSION(0, 0);
> +u32 __ro_after_init kvm_host_psci_function_id[PSCI_FN_MAX];
> +
> +static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt)
> +{
> +	return host_ctxt->regs.regs[0];
> +}
> +
> +static bool is_psci_0_1_call(u64 func_id)
> +{
> +	unsigned int i;
> +
> +	for (i = 0; i < ARRAY_SIZE(kvm_host_psci_function_id); ++i) {
> +		if (func_id == kvm_host_psci_function_id[i])
> +			return true;
> +	}
> +	return false;
> +}
> +
> +static bool is_psci_0_2_call(u64 func_id)
> +{
> +	/* SMCCC reserves IDs 0x00-1F with the given 32/64-bit base for PSCI. */
> +	return (PSCI_0_2_FN(0) <= func_id && func_id <= PSCI_0_2_FN(31)) ||
> +	       (PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
> +}
> +
> +static bool is_psci_call(u64 func_id)
> +{
> +	switch (kvm_host_psci_version) {
> +	case PSCI_VERSION(0, 0):
> +		return false;
> +	case PSCI_VERSION(0, 1):
> +		return is_psci_0_1_call(func_id);
> +	default:
> +		return is_psci_0_2_call(func_id);
> +	}
> +}
> +
> +static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
> +{
> +	return PSCI_RET_NOT_SUPPORTED;
> +}
> +
> +static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
> +{
> +	switch (func_id) {
> +	default:
> +		return PSCI_RET_NOT_SUPPORTED;
> +	}
> +}
> +
> +static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
> +{
> +	switch (func_id) {
> +	default:
> +		return psci_0_2_handler(func_id, host_ctxt);
> +	}
> +}
> +
> +bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
> +{
> +	u64 func_id = get_psci_func_id(host_ctxt);
> +	unsigned long ret;
> +
> +	if (!is_psci_call(func_id))
> +		return false;
> +
> +	switch (kvm_host_psci_version) {
> +	case PSCI_VERSION(0, 0):
> +		ret = PSCI_RET_NOT_SUPPORTED;

But isn't that way too late? No PSCI means that we cannot control the
way CPUs boot at all. I think we should completely fail the whole
Protected KVM business if we don't have PSCI.

> +		break;
> +	case PSCI_VERSION(0, 1):
> +		ret = psci_0_1_handler(func_id, host_ctxt);
> +		break;
> +	case PSCI_VERSION(0, 2):
> +		ret = psci_0_2_handler(func_id, host_ctxt);
> +		break;
> +	default:
> +		ret = psci_1_0_handler(func_id, host_ctxt);
> +		break;
> +	}
> +
> +	host_ctxt->regs.regs[0] = ret;
> +	host_ctxt->regs.regs[1] = 0;
> +	host_ctxt->regs.regs[2] = 0;
> +	host_ctxt->regs.regs[3] = 0;
> +	return true;
> +}
> -- 
> 2.29.2.299.gdc1121823c-goog
> 
> 

Thanks,

	M.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index a3289071f3d8..95a2bbbcc7e1 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -96,6 +96,10 @@  void deactivate_traps_vhe_put(void);
 
 u64 __guest_enter(struct kvm_vcpu *vcpu);
 
+#ifdef __KVM_NVHE_HYPERVISOR__
+bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
+#endif
+
 void __noreturn hyp_panic(void);
 #ifdef __KVM_NVHE_HYPERVISOR__
 void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index cdd7981ea560..7d2270eeecfb 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -19,6 +19,7 @@ 
 #include <linux/kvm_irqfd.h>
 #include <linux/irqbypass.h>
 #include <linux/sched/stat.h>
+#include <linux/psci.h>
 #include <trace/events/kvm.h>
 
 #define CREATE_TRACE_POINTS
@@ -1514,6 +1515,18 @@  static void init_cpu_logical_map(void)
 		CHOOSE_NVHE_SYM(__cpu_logical_map)[cpu] = cpu_logical_map(cpu);
 }
 
+static void init_psci_relay(void)
+{
+	extern u32 kvm_nvhe_sym(kvm_host_psci_version);
+	extern u32 kvm_nvhe_sym(kvm_host_psci_function_id)[PSCI_FN_MAX];
+	int i;
+
+	CHOOSE_NVHE_SYM(kvm_host_psci_version) = psci_ops.get_version
+		? psci_ops.get_version() : PSCI_VERSION(0, 0);
+	for (i = 0; i < PSCI_FN_MAX; ++i)
+		CHOOSE_NVHE_SYM(kvm_host_psci_function_id)[i] = psci_get_function_id(i);
+}
+
 static int init_common_resources(void)
 {
 	return kvm_set_ipa_limit();
@@ -1693,6 +1706,7 @@  static int init_hyp_mode(void)
 	}
 
 	init_cpu_logical_map();
+	init_psci_relay();
 
 	return 0;
 
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 2d842e009a40..bf62c8e42ab2 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -7,7 +7,7 @@  asflags-y := -D__KVM_NVHE_HYPERVISOR__
 ccflags-y := -D__KVM_NVHE_HYPERVISOR__
 
 obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
-	 hyp-main.o hyp-smp.o
+	 hyp-main.o hyp-smp.o psci-relay.o
 obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
 	 ../fpsimd.o ../hyp-entry.o
 
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 71a17af05953..df4acb40dd39 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -120,7 +120,11 @@  static void skip_host_instruction(void)
 
 static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
 {
-	default_host_smc_handler(host_ctxt);
+	bool handled;
+
+	handled = kvm_host_psci_handler(host_ctxt);
+	if (!handled)
+		default_host_smc_handler(host_ctxt);
 
 	/*
 	 * Unlike HVC, the return address of an SMC is the instruction's PC.
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
new file mode 100644
index 000000000000..d75d3f896bfd
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -0,0 +1,104 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 - Google LLC
+ * Author: David Brazdil <dbrazdil@google.com>
+ */
+
+#include <asm/kvm_asm.h>
+#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
+#include <kvm/arm_hypercalls.h>
+#include <linux/arm-smccc.h>
+#include <linux/psci.h>
+#include <kvm/arm_psci.h>
+#include <uapi/linux/psci.h>
+
+/* Config options set by the host. */
+u32 __ro_after_init kvm_host_psci_version = PSCI_VERSION(0, 0);
+u32 __ro_after_init kvm_host_psci_function_id[PSCI_FN_MAX];
+
+static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt)
+{
+	return host_ctxt->regs.regs[0];
+}
+
+static bool is_psci_0_1_call(u64 func_id)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(kvm_host_psci_function_id); ++i) {
+		if (func_id == kvm_host_psci_function_id[i])
+			return true;
+	}
+	return false;
+}
+
+static bool is_psci_0_2_call(u64 func_id)
+{
+	/* SMCCC reserves IDs 0x00-1F with the given 32/64-bit base for PSCI. */
+	return (PSCI_0_2_FN(0) <= func_id && func_id <= PSCI_0_2_FN(31)) ||
+	       (PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
+}
+
+static bool is_psci_call(u64 func_id)
+{
+	switch (kvm_host_psci_version) {
+	case PSCI_VERSION(0, 0):
+		return false;
+	case PSCI_VERSION(0, 1):
+		return is_psci_0_1_call(func_id);
+	default:
+		return is_psci_0_2_call(func_id);
+	}
+}
+
+static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
+{
+	return PSCI_RET_NOT_SUPPORTED;
+}
+
+static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
+{
+	switch (func_id) {
+	default:
+		return PSCI_RET_NOT_SUPPORTED;
+	}
+}
+
+static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
+{
+	switch (func_id) {
+	default:
+		return psci_0_2_handler(func_id, host_ctxt);
+	}
+}
+
+bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
+{
+	u64 func_id = get_psci_func_id(host_ctxt);
+	unsigned long ret;
+
+	if (!is_psci_call(func_id))
+		return false;
+
+	switch (kvm_host_psci_version) {
+	case PSCI_VERSION(0, 0):
+		ret = PSCI_RET_NOT_SUPPORTED;
+		break;
+	case PSCI_VERSION(0, 1):
+		ret = psci_0_1_handler(func_id, host_ctxt);
+		break;
+	case PSCI_VERSION(0, 2):
+		ret = psci_0_2_handler(func_id, host_ctxt);
+		break;
+	default:
+		ret = psci_1_0_handler(func_id, host_ctxt);
+		break;
+	}
+
+	host_ctxt->regs.regs[0] = ret;
+	host_ctxt->regs.regs[1] = 0;
+	host_ctxt->regs.regs[2] = 0;
+	host_ctxt->regs.regs[3] = 0;
+	return true;
+}