diff mbox series

[RFC,4/7] KVM: Refactor Assembly-code to access vCPU gp-registers through a macro

Message ID 20240911143421.85612-5-faresx@amazon.de (mailing list archive)
State New
Headers show
Series support for mm-local memory allocations and use it | expand

Commit Message

Fares Mehanna Sept. 11, 2024, 2:34 p.m. UTC
Right now assembly code accesses vCPU gp-regs directly from the context struct
"struct kvm_cpu_context" using "CPU_XREG_OFFSET()".

Since we want to move gp-regs to dynamic memory, we can no longer assume that
gp-regs will be embedded in the context struct, thus split the access to two
steps.

The first is to get the gp-regs from the context using the assembly macro
"get_ctxt_gp_regs".

And the second is to access the gp-registers directly from within the
"struct user_pt_regs" by removing the offset "CPU_USER_PT_REGS" from the access
macro "CPU_XREG_OFFSET()".

I also changed variable naming and comments where appropriate.

Signed-off-by: Fares Mehanna <faresx@amazon.de>
---
 arch/arm64/include/asm/kvm_asm.h | 48 +++++++++++++++++---------------
 arch/arm64/kvm/hyp/entry.S       | 15 ++++++++++
 arch/arm64/kvm/hyp/nvhe/host.S   | 20 ++++++++++---
 3 files changed, 57 insertions(+), 26 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 2181a11b9d92..fa4fb642a5f5 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -313,6 +313,10 @@  void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt,
 	str	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
 .endm
 
+.macro get_ctxt_gp_regs ctxt, regs
+	add	\regs, \ctxt, #CPU_USER_PT_REGS
+.endm
+
 /*
  * KVM extable for unexpected exceptions.
  * Create a struct kvm_exception_table_entry output to a section that can be
@@ -329,7 +333,7 @@  void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt,
 	.popsection
 .endm
 
-#define CPU_XREG_OFFSET(x)	(CPU_USER_PT_REGS + 8*x)
+#define CPU_XREG_OFFSET(x)	(8 * (x))
 #define CPU_LR_OFFSET		CPU_XREG_OFFSET(30)
 #define CPU_SP_EL0_OFFSET	(CPU_LR_OFFSET + 8)
 
@@ -337,34 +341,34 @@  void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt,
  * We treat x18 as callee-saved as the host may use it as a platform
  * register (e.g. for shadow call stack).
  */
-.macro save_callee_saved_regs ctxt
-	str	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
-	stp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
-	stp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
-	stp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
-	stp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
-	stp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
-	stp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
+.macro save_callee_saved_regs regs
+	str	x18,      [\regs, #CPU_XREG_OFFSET(18)]
+	stp	x19, x20, [\regs, #CPU_XREG_OFFSET(19)]
+	stp	x21, x22, [\regs, #CPU_XREG_OFFSET(21)]
+	stp	x23, x24, [\regs, #CPU_XREG_OFFSET(23)]
+	stp	x25, x26, [\regs, #CPU_XREG_OFFSET(25)]
+	stp	x27, x28, [\regs, #CPU_XREG_OFFSET(27)]
+	stp	x29, lr,  [\regs, #CPU_XREG_OFFSET(29)]
 .endm
 
-.macro restore_callee_saved_regs ctxt
-	// We require \ctxt is not x18-x28
-	ldr	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
-	ldp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
-	ldp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
-	ldp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
-	ldp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
-	ldp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
-	ldp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
+.macro restore_callee_saved_regs regs
+	// We require \regs is not x18-x28
+	ldr	x18,      [\regs, #CPU_XREG_OFFSET(18)]
+	ldp	x19, x20, [\regs, #CPU_XREG_OFFSET(19)]
+	ldp	x21, x22, [\regs, #CPU_XREG_OFFSET(21)]
+	ldp	x23, x24, [\regs, #CPU_XREG_OFFSET(23)]
+	ldp	x25, x26, [\regs, #CPU_XREG_OFFSET(25)]
+	ldp	x27, x28, [\regs, #CPU_XREG_OFFSET(27)]
+	ldp	x29, lr,  [\regs, #CPU_XREG_OFFSET(29)]
 .endm
 
-.macro save_sp_el0 ctxt, tmp
+.macro save_sp_el0 regs, tmp
 	mrs	\tmp,	sp_el0
-	str	\tmp,	[\ctxt, #CPU_SP_EL0_OFFSET]
+	str	\tmp,	[\regs, #CPU_SP_EL0_OFFSET]
 .endm
 
-.macro restore_sp_el0 ctxt, tmp
-	ldr	\tmp,	  [\ctxt, #CPU_SP_EL0_OFFSET]
+.macro restore_sp_el0 regs, tmp
+	ldr	\tmp,	  [\regs, #CPU_SP_EL0_OFFSET]
 	msr	sp_el0, \tmp
 .endm
 
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index 4433a234aa9b..628a123bcdc1 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -28,6 +28,9 @@  SYM_FUNC_START(__guest_enter)
 
 	adr_this_cpu x1, kvm_hyp_ctxt, x2
 
+	// Get gp-regs pointer from the context
+	get_ctxt_gp_regs x1, x1
+
 	// Store the hyp regs
 	save_callee_saved_regs x1
 
@@ -62,6 +65,9 @@  alternative_else_nop_endif
 	// when this feature is enabled for kernel code.
 	ptrauth_switch_to_guest x29, x0, x1, x2
 
+	// Get gp-regs pointer from the context
+	get_ctxt_gp_regs x29, x29
+
 	// Restore the guest's sp_el0
 	restore_sp_el0 x29, x0
 
@@ -108,6 +114,7 @@  SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
 	// current state is saved to the guest context but it will only be
 	// accurate if the guest had been completely restored.
 	adr_this_cpu x0, kvm_hyp_ctxt, x1
+	get_ctxt_gp_regs x0, x0
 	adr_l	x1, hyp_panic
 	str	x1, [x0, #CPU_XREG_OFFSET(30)]
 
@@ -120,6 +127,7 @@  SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
 	// vcpu x0-x1 on the stack
 
 	add	x1, x1, #VCPU_CONTEXT
+	get_ctxt_gp_regs x1, x1
 
 	ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
 
@@ -145,6 +153,10 @@  SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
 	// Store the guest's sp_el0
 	save_sp_el0	x1, x2
 
+	// Recover vCPU context to x1
+	get_vcpu_ptr    x1, x2
+	add     x1, x1, #VCPU_CONTEXT
+
 	adr_this_cpu x2, kvm_hyp_ctxt, x3
 
 	// Macro ptrauth_switch_to_hyp format:
@@ -157,6 +169,9 @@  SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
 	// mte_switch_to_hyp(g_ctxt, h_ctxt, reg1)
 	mte_switch_to_hyp x1, x2, x3
 
+	// Get gp-regs pointer from the context
+	get_ctxt_gp_regs x2, x2
+
 	// Restore hyp's sp_el0
 	restore_sp_el0 x2, x3
 
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index 3d610fc51f4d..31afa7396294 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -17,6 +17,12 @@ 
 SYM_FUNC_START(__host_exit)
 	get_host_ctxt	x0, x1
 
+	/* Keep host context in x1 */
+	mov	x1, x0
+
+	/* Get gp-regs pointer from the context */
+	get_ctxt_gp_regs x0, x0
+
 	/* Store the host regs x2 and x3 */
 	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(2)]
 
@@ -36,7 +42,10 @@  SYM_FUNC_START(__host_exit)
 	/* Store the host regs x18-x29, lr */
 	save_callee_saved_regs x0
 
-	/* Save the host context pointer in x29 across the function call */
+	/* Save the host context pointer in x28 across the function call */
+	mov	x28, x1
+
+	/* Save the host gp-regs pointer in x29 across the function call */
 	mov	x29, x0
 
 #ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
@@ -46,7 +55,7 @@  alternative_else_nop_endif
 
 alternative_if ARM64_KVM_PROTECTED_MODE
 	/* Save kernel ptrauth keys. */
-	add x18, x29, #CPU_APIAKEYLO_EL1
+	add x18, x28, #CPU_APIAKEYLO_EL1
 	ptrauth_save_state x18, x19, x20
 
 	/* Use hyp keys. */
@@ -58,6 +67,7 @@  alternative_else_nop_endif
 __skip_pauth_save:
 #endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
 
+	mov	x0, x28
 	bl	handle_trap
 
 __host_enter_restore_full:
@@ -68,7 +78,7 @@  b __skip_pauth_restore
 alternative_else_nop_endif
 
 alternative_if ARM64_KVM_PROTECTED_MODE
-	add x18, x29, #CPU_APIAKEYLO_EL1
+	add x18, x28, #CPU_APIAKEYLO_EL1
 	ptrauth_restore_state x18, x19, x20
 alternative_else_nop_endif
 __skip_pauth_restore:
@@ -101,7 +111,8 @@  SYM_FUNC_END(__host_exit)
  * void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
  */
 SYM_FUNC_START(__host_enter)
-	mov	x29, x0
+	mov	x28, x0
+	get_ctxt_gp_regs x0, x29
 	b	__host_enter_restore_full
 SYM_FUNC_END(__host_enter)
 
@@ -141,6 +152,7 @@  SYM_FUNC_START(__hyp_do_panic)
 
 	/* Enter the host, conditionally restoring the host context. */
 	cbz	x29, __host_enter_without_restoring
+	get_ctxt_gp_regs x29, x29
 	b	__host_enter_for_panic
 SYM_FUNC_END(__hyp_do_panic)