diff mbox series

[v4,12/19] KVM: arm64: nVHE: Switch to hyp context for EL2

Message ID 20200915104643.2543892-13-ascull@google.com (mailing list archive)
State New, archived
Headers show
Series Introduce separate nVHE hyp context | expand

Commit Message

Andrew Scull Sept. 15, 2020, 10:46 a.m. UTC
Save and restore the host context when switching to and from hyp. This
gives hyp its own context that the host will not see as a step towards a
full trust boundary between the two.

SP_EL0 and pointer authentication keys are currently shared between the
host and hyp so don't need to be switched yet.

Signed-off-by: Andrew Scull <ascull@google.com>
---
 arch/arm64/kvm/hyp/include/hyp/switch.h |  2 +
 arch/arm64/kvm/hyp/nvhe/Makefile        |  2 +-
 arch/arm64/kvm/hyp/nvhe/host.S          | 68 ++++++++++++++++++-------
 arch/arm64/kvm/hyp/nvhe/hyp-main.c      | 36 +++++++++++++
 4 files changed, 89 insertions(+), 19 deletions(-)
 create mode 100644 arch/arm64/kvm/hyp/nvhe/hyp-main.c

Comments

Marc Zyngier Sept. 16, 2020, 1:14 p.m. UTC | #1
Hi Andrew,

A few very minor comments below, which we can sort in a subsequent
patch:

On Tue, 15 Sep 2020 11:46:36 +0100,
Andrew Scull <ascull@google.com> wrote:
> 
> Save and restore the host context when switching to and from hyp. This
> gives hyp its own context that the host will not see as a step towards a
> full trust boundary between the two.
> 
> SP_EL0 and pointer authentication keys are currently shared between the
> host and hyp so don't need to be switched yet.
> 
> Signed-off-by: Andrew Scull <ascull@google.com>
> ---
>  arch/arm64/kvm/hyp/include/hyp/switch.h |  2 +
>  arch/arm64/kvm/hyp/nvhe/Makefile        |  2 +-
>  arch/arm64/kvm/hyp/nvhe/host.S          | 68 ++++++++++++++++++-------
>  arch/arm64/kvm/hyp/nvhe/hyp-main.c      | 36 +++++++++++++
>  4 files changed, 89 insertions(+), 19 deletions(-)
>  create mode 100644 arch/arm64/kvm/hyp/nvhe/hyp-main.c
> 
> diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
> index 821721b78ad9..4536b50ddc06 100644
> --- a/arch/arm64/kvm/hyp/include/hyp/switch.h
> +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
> @@ -372,6 +372,8 @@ static inline bool esr_is_ptrauth_trap(u32 esr)
>  	ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val;                   \
>  } while(0)
>  
> +DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
> +

Why do we need this declaration? Isn't the one from patch 8 enough? Or
is it just a spurious addition (things seem to compile fine without it).

>  static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
>  {
>  	struct kvm_cpu_context *ctxt;
> diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
> index ddf98eb07b9d..46c89e8c30bc 100644
> --- a/arch/arm64/kvm/hyp/nvhe/Makefile
> +++ b/arch/arm64/kvm/hyp/nvhe/Makefile
> @@ -6,7 +6,7 @@
>  asflags-y := -D__KVM_NVHE_HYPERVISOR__
>  ccflags-y := -D__KVM_NVHE_HYPERVISOR__
>  
> -obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o
> +obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o hyp-main.o
>  obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
>  	 ../fpsimd.o ../hyp-entry.o
>  
> diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
> index 9ab7814e6114..d26e41773dc4 100644
> --- a/arch/arm64/kvm/hyp/nvhe/host.S
> +++ b/arch/arm64/kvm/hyp/nvhe/host.S
> @@ -12,6 +12,55 @@
>  
>  	.text
>  
> +SYM_FUNC_START(__host_exit)
> +	stp	x0, x1, [sp, #-16]!
> +
> +	get_host_ctxt	x0, x1
> +
> +	ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
> +

It isn't obvious to me why we mess with PAN here. nVHE doesn't have an
EL0, and the spec says that PAN only exists at EL2 when HCR_EL2.E2H==1.

> +	/* Store the host regs x2 and x3 */
> +	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(2)]
> +
> +	/* Retrieve the host regs x0-x1 from the stack */
> +	ldp	x2, x3, [sp], #16	// x0, x1
> +
> +	/* Store the host regs x0-x1 and x4-x17 */
> +	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(0)]
> +	stp	x4, x5,   [x0, #CPU_XREG_OFFSET(4)]
> +	stp	x6, x7,   [x0, #CPU_XREG_OFFSET(6)]
> +	stp	x8, x9,   [x0, #CPU_XREG_OFFSET(8)]
> +	stp	x10, x11, [x0, #CPU_XREG_OFFSET(10)]
> +	stp	x12, x13, [x0, #CPU_XREG_OFFSET(12)]
> +	stp	x14, x15, [x0, #CPU_XREG_OFFSET(14)]
> +	stp	x16, x17, [x0, #CPU_XREG_OFFSET(16)]
> +
> +	/* Store the host regs x18-x29, lr */
> +	save_callee_saved_regs x0
> +
> +	/* Save the host context pointer in x29 across the function call */
> +	mov	x29, x0
> +	bl	handle_trap
> +
> +	/* Restore host regs x0-x17 */
> +	ldp	x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
> +	ldp	x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
> +	ldp	x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
> +	ldp	x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
> +	ldp	x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
> +	ldp	x10, x11, [x29, #CPU_XREG_OFFSET(10)]
> +	ldp	x12, x13, [x29, #CPU_XREG_OFFSET(12)]
> +	ldp	x14, x15, [x29, #CPU_XREG_OFFSET(14)]
> +	ldp	x16, x17, [x29, #CPU_XREG_OFFSET(16)]
> +
> +	/* Restore host regs x18-x29, lr */
> +	restore_callee_saved_regs x29
> +
> +	/* Do not touch any register after this! */
> +	eret
> +	sb
> +SYM_FUNC_END(__host_exit)
> +
>  SYM_FUNC_START(__hyp_do_panic)
>  	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
>  		      PSR_MODE_EL1h)
> @@ -34,7 +83,7 @@ SYM_FUNC_END(__hyp_do_panic)
>  
>  	/* Check for a stub HVC call */
>  	cmp	x0, #HVC_STUB_HCALL_NR
> -	b.hs	1f
> +	b.hs	__host_exit
>  
>  	/*
>  	 * Compute the idmap address of __kvm_handle_stub_hvc and
> @@ -50,23 +99,6 @@ SYM_FUNC_END(__hyp_do_panic)
>  	/* x5 = __pa(x5) */
>  	sub	x5, x5, x6
>  	br	x5
> -
> -1:
> -	/*
> -	 * Shuffle the parameters before calling the function
> -	 * pointed to in x0. Assumes parameters in x[1,2,3].
> -	 */
> -	kern_hyp_va	x0
> -	str	lr, [sp, #-16]!
> -	mov	lr, x0
> -	mov	x0, x1
> -	mov	x1, x2
> -	mov	x2, x3
> -	blr	lr
> -	ldr	lr, [sp], #16
> -
> -	eret
> -	sb
>  .L__vect_end\@:
>  .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
>  	.error "host_el1_sync_vect larger than vector entry"
> diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> new file mode 100644
> index 000000000000..570c3896f42e
> --- /dev/null
> +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> @@ -0,0 +1,36 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Copyright (C) 2020 - Google Inc
> + * Author: Andrew Scull <ascull@google.com>
> + */
> +
> +#include <hyp/switch.h>
> +
> +#include <asm/kvm_asm.h>
> +#include <asm/kvm_emulate.h>
> +#include <asm/kvm_host.h>
> +#include <asm/kvm_hyp.h>
> +#include <asm/kvm_mmu.h>
> +
> +typedef unsigned long (*hypcall_fn_t)
> +	(unsigned long, unsigned long, unsigned long);
> +
> +void handle_trap(struct kvm_cpu_context *host_ctxt)

This probably needs to have the asmlinkage attribute, although I'm not
sure we use it for anything on arm64 just yet.

Thanks,

	M.
diff mbox series

Patch

diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 821721b78ad9..4536b50ddc06 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -372,6 +372,8 @@  static inline bool esr_is_ptrauth_trap(u32 esr)
 	ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val;                   \
 } while(0)
 
+DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
+
 static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpu_context *ctxt;
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index ddf98eb07b9d..46c89e8c30bc 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -6,7 +6,7 @@ 
 asflags-y := -D__KVM_NVHE_HYPERVISOR__
 ccflags-y := -D__KVM_NVHE_HYPERVISOR__
 
-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o
+obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o hyp-main.o
 obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
 	 ../fpsimd.o ../hyp-entry.o
 
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index 9ab7814e6114..d26e41773dc4 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -12,6 +12,55 @@ 
 
 	.text
 
+SYM_FUNC_START(__host_exit)
+	stp	x0, x1, [sp, #-16]!
+
+	get_host_ctxt	x0, x1
+
+	ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
+
+	/* Store the host regs x2 and x3 */
+	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(2)]
+
+	/* Retrieve the host regs x0-x1 from the stack */
+	ldp	x2, x3, [sp], #16	// x0, x1
+
+	/* Store the host regs x0-x1 and x4-x17 */
+	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(0)]
+	stp	x4, x5,   [x0, #CPU_XREG_OFFSET(4)]
+	stp	x6, x7,   [x0, #CPU_XREG_OFFSET(6)]
+	stp	x8, x9,   [x0, #CPU_XREG_OFFSET(8)]
+	stp	x10, x11, [x0, #CPU_XREG_OFFSET(10)]
+	stp	x12, x13, [x0, #CPU_XREG_OFFSET(12)]
+	stp	x14, x15, [x0, #CPU_XREG_OFFSET(14)]
+	stp	x16, x17, [x0, #CPU_XREG_OFFSET(16)]
+
+	/* Store the host regs x18-x29, lr */
+	save_callee_saved_regs x0
+
+	/* Save the host context pointer in x29 across the function call */
+	mov	x29, x0
+	bl	handle_trap
+
+	/* Restore host regs x0-x17 */
+	ldp	x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
+	ldp	x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
+	ldp	x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
+	ldp	x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
+	ldp	x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
+	ldp	x10, x11, [x29, #CPU_XREG_OFFSET(10)]
+	ldp	x12, x13, [x29, #CPU_XREG_OFFSET(12)]
+	ldp	x14, x15, [x29, #CPU_XREG_OFFSET(14)]
+	ldp	x16, x17, [x29, #CPU_XREG_OFFSET(16)]
+
+	/* Restore host regs x18-x29, lr */
+	restore_callee_saved_regs x29
+
+	/* Do not touch any register after this! */
+	eret
+	sb
+SYM_FUNC_END(__host_exit)
+
 SYM_FUNC_START(__hyp_do_panic)
 	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
 		      PSR_MODE_EL1h)
@@ -34,7 +83,7 @@  SYM_FUNC_END(__hyp_do_panic)
 
 	/* Check for a stub HVC call */
 	cmp	x0, #HVC_STUB_HCALL_NR
-	b.hs	1f
+	b.hs	__host_exit
 
 	/*
 	 * Compute the idmap address of __kvm_handle_stub_hvc and
@@ -50,23 +99,6 @@  SYM_FUNC_END(__hyp_do_panic)
 	/* x5 = __pa(x5) */
 	sub	x5, x5, x6
 	br	x5
-
-1:
-	/*
-	 * Shuffle the parameters before calling the function
-	 * pointed to in x0. Assumes parameters in x[1,2,3].
-	 */
-	kern_hyp_va	x0
-	str	lr, [sp, #-16]!
-	mov	lr, x0
-	mov	x0, x1
-	mov	x1, x2
-	mov	x2, x3
-	blr	lr
-	ldr	lr, [sp], #16
-
-	eret
-	sb
 .L__vect_end\@:
 .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
 	.error "host_el1_sync_vect larger than vector entry"
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
new file mode 100644
index 000000000000..570c3896f42e
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -0,0 +1,36 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 - Google Inc
+ * Author: Andrew Scull <ascull@google.com>
+ */
+
+#include <hyp/switch.h>
+
+#include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
+
+typedef unsigned long (*hypcall_fn_t)
+	(unsigned long, unsigned long, unsigned long);
+
+void handle_trap(struct kvm_cpu_context *host_ctxt)
+{
+	u64 esr = read_sysreg_el2(SYS_ESR);
+	hypcall_fn_t func;
+	unsigned long ret;
+
+	if (ESR_ELx_EC(esr) != ESR_ELx_EC_HVC64)
+		hyp_panic();
+
+	/*
+	 * __kvm_call_hyp takes a pointer in the host address space and
+	 * up to three arguments.
+	 */
+	func = (hypcall_fn_t)kern_hyp_va(host_ctxt->regs.regs[0]);
+	ret = func(host_ctxt->regs.regs[1],
+		   host_ctxt->regs.regs[2],
+		   host_ctxt->regs.regs[3]);
+	host_ctxt->regs.regs[0] = ret;
+}