diff mbox series

[v4,02/13] KVM: arm64: Fix __pkvm_init_switch_pgd call ABI

Message ID 20240529121251.1993135-3-ptosi@google.com (mailing list archive)
State New
Headers show
Series KVM: arm64: Add support for hypervisor kCFI | expand

Commit Message

Pierre-Clément Tosi May 29, 2024, 12:12 p.m. UTC
Fix the mismatch between the (incorrect) C signature, C call site, and
asm implementation by aligning all three on an API passing the
parameters (pgd and SP) separately, instead of as a bundled struct.

Remove the now unnecessary memory accesses while the MMU is off from the
asm, which simplifies the C caller (as it does not need to convert a VA
struct pointer to PA) and makes the code slightly more robust by
offsetting the struct fields from C and properly expressing the call to
the C compiler (e.g. type checker and kCFI).

Fixes: f320bc742bc2 ("KVM: arm64: Prepare the creation of s1 mappings at EL2")
Signed-off-by: Pierre-Clément Tosi <ptosi@google.com>
---
 arch/arm64/include/asm/kvm_hyp.h   |  3 +--
 arch/arm64/kvm/hyp/nvhe/hyp-init.S | 17 +++++++++--------
 arch/arm64/kvm/hyp/nvhe/setup.c    |  4 ++--
 3 files changed, 12 insertions(+), 12 deletions(-)

Comments

Will Deacon June 3, 2024, 2:22 p.m. UTC | #1
On Wed, May 29, 2024 at 01:12:08PM +0100, Pierre-Clément Tosi wrote:
> Fix the mismatch between the (incorrect) C signature, C call site, and
> asm implementation by aligning all three on an API passing the
> parameters (pgd and SP) separately, instead of as a bundled struct.
> 
> Remove the now unnecessary memory accesses while the MMU is off from the
> asm, which simplifies the C caller (as it does not need to convert a VA
> struct pointer to PA) and makes the code slightly more robust by
> offsetting the struct fields from C and properly expressing the call to
> the C compiler (e.g. type checker and kCFI).
> 
> Fixes: f320bc742bc2 ("KVM: arm64: Prepare the creation of s1 mappings at EL2")
> Signed-off-by: Pierre-Clément Tosi <ptosi@google.com>
> ---
>  arch/arm64/include/asm/kvm_hyp.h   |  3 +--
>  arch/arm64/kvm/hyp/nvhe/hyp-init.S | 17 +++++++++--------
>  arch/arm64/kvm/hyp/nvhe/setup.c    |  4 ++--
>  3 files changed, 12 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
> index 3e80464f8953..58b5a2b14d88 100644
> --- a/arch/arm64/include/asm/kvm_hyp.h
> +++ b/arch/arm64/include/asm/kvm_hyp.h
> @@ -123,8 +123,7 @@ void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
>  #endif
>  
>  #ifdef __KVM_NVHE_HYPERVISOR__
> -void __pkvm_init_switch_pgd(phys_addr_t phys, unsigned long size,
> -			    phys_addr_t pgd, void *sp, void *cont_fn);
> +void __pkvm_init_switch_pgd(phys_addr_t pgd, void *sp, void (*fn)(void));
>  int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
>  		unsigned long *per_cpu_base, u32 hyp_va_bits);
>  void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
> diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
> index 2994878d68ea..d859c4de06b6 100644
> --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
> +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
> @@ -265,33 +265,34 @@ alternative_else_nop_endif
>  
>  SYM_CODE_END(__kvm_handle_stub_hvc)
>  
> +/*
> + * void __pkvm_init_switch_pgd(phys_addr_t pgd, void *sp, void (*fn)(void));
> + */
>  SYM_FUNC_START(__pkvm_init_switch_pgd)
>  	/* Turn the MMU off */
>  	pre_disable_mmu_workaround
> -	mrs	x2, sctlr_el2
> -	bic	x3, x2, #SCTLR_ELx_M
> +	mrs	x9, sctlr_el2
> +	bic	x3, x9, #SCTLR_ELx_M

This is fine, but there's no need to jump all the way to x9 for the
register allocation. I think it would be neatest to re-jig the function
so it uses x4 here for the sctlr and then uses x5 later for the ttbr.

> diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
> index 859f22f754d3..1cbd2c78f7a1 100644
> --- a/arch/arm64/kvm/hyp/nvhe/setup.c
> +++ b/arch/arm64/kvm/hyp/nvhe/setup.c
> @@ -316,7 +316,7 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
>  {
>  	struct kvm_nvhe_init_params *params;
>  	void *virt = hyp_phys_to_virt(phys);
> -	void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
> +	typeof(__pkvm_init_switch_pgd) *fn;
>  	int ret;
>  
>  	BUG_ON(kvm_check_pvm_sysreg_table());
> @@ -340,7 +340,7 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
>  	/* Jump in the idmap page to switch to the new page-tables */
>  	params = this_cpu_ptr(&kvm_init_params);
>  	fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
> -	fn(__hyp_pa(params), __pkvm_init_finalise);
> +	fn(params->pgd_pa, (void *)params->stack_hyp_va, __pkvm_init_finalise);

Why not have the prototype of __pkvm_init_switch_pgd() take the SP as
an 'unsigned long' so that you can avoid this cast altogether?

Will
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 3e80464f8953..58b5a2b14d88 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -123,8 +123,7 @@  void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
 #endif
 
 #ifdef __KVM_NVHE_HYPERVISOR__
-void __pkvm_init_switch_pgd(phys_addr_t phys, unsigned long size,
-			    phys_addr_t pgd, void *sp, void *cont_fn);
+void __pkvm_init_switch_pgd(phys_addr_t pgd, void *sp, void (*fn)(void));
 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
 		unsigned long *per_cpu_base, u32 hyp_va_bits);
 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index 2994878d68ea..d859c4de06b6 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -265,33 +265,34 @@  alternative_else_nop_endif
 
 SYM_CODE_END(__kvm_handle_stub_hvc)
 
+/*
+ * void __pkvm_init_switch_pgd(phys_addr_t pgd, void *sp, void (*fn)(void));
+ */
 SYM_FUNC_START(__pkvm_init_switch_pgd)
 	/* Turn the MMU off */
 	pre_disable_mmu_workaround
-	mrs	x2, sctlr_el2
-	bic	x3, x2, #SCTLR_ELx_M
+	mrs	x9, sctlr_el2
+	bic	x3, x9, #SCTLR_ELx_M
 	msr	sctlr_el2, x3
 	isb
 
 	tlbi	alle2
 
 	/* Install the new pgtables */
-	ldr	x3, [x0, #NVHE_INIT_PGD_PA]
-	phys_to_ttbr x4, x3
+	phys_to_ttbr x4, x0
 alternative_if ARM64_HAS_CNP
 	orr	x4, x4, #TTBR_CNP_BIT
 alternative_else_nop_endif
 	msr	ttbr0_el2, x4
 
 	/* Set the new stack pointer */
-	ldr	x0, [x0, #NVHE_INIT_STACK_HYP_VA]
-	mov	sp, x0
+	mov	sp, x1
 
 	/* And turn the MMU back on! */
 	dsb	nsh
 	isb
-	set_sctlr_el2	x2
-	ret	x1
+	set_sctlr_el2	x9
+	ret	x2
 SYM_FUNC_END(__pkvm_init_switch_pgd)
 
 	.popsection
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index 859f22f754d3..1cbd2c78f7a1 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -316,7 +316,7 @@  int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
 {
 	struct kvm_nvhe_init_params *params;
 	void *virt = hyp_phys_to_virt(phys);
-	void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
+	typeof(__pkvm_init_switch_pgd) *fn;
 	int ret;
 
 	BUG_ON(kvm_check_pvm_sysreg_table());
@@ -340,7 +340,7 @@  int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
 	/* Jump in the idmap page to switch to the new page-tables */
 	params = this_cpu_ptr(&kvm_init_params);
 	fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
-	fn(__hyp_pa(params), __pkvm_init_finalise);
+	fn(params->pgd_pa, (void *)params->stack_hyp_va, __pkvm_init_finalise);
 
 	unreachable();
 }