diff mbox series

[1/6] arm64: Rename the VHE switch to "finalise_el2"

Message ID 20220627151412.1496361-2-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: Disabling SVE/SME from the command-line | expand

Commit Message

Marc Zyngier June 27, 2022, 3:14 p.m. UTC
as we are about to perform a lot more in 'mutate_to_vhe' than
we currently do, this function really becomes the point where
we finalise the basic EL2 configuration.

Reflect this into the code by renaming a bunch of things:
- HVC_VHE_RESTART -> HVC_FINALISE_EL2
- switch_to_vhe --> finalise_el2
- mutate_to_vhe -> __finalise_el2

No functional changes.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 Documentation/virt/kvm/arm/hyp-abi.rst |  2 +-
 arch/arm64/include/asm/virt.h          |  4 ++--
 arch/arm64/kernel/head.S               |  6 +++---
 arch/arm64/kernel/hyp-stub.S           | 21 ++++++++++-----------
 arch/arm64/kernel/sleep.S              |  2 +-
 5 files changed, 17 insertions(+), 18 deletions(-)
diff mbox series

Patch

diff --git a/Documentation/virt/kvm/arm/hyp-abi.rst b/Documentation/virt/kvm/arm/hyp-abi.rst
index 4d43fbc25195..0967acbff560 100644
--- a/Documentation/virt/kvm/arm/hyp-abi.rst
+++ b/Documentation/virt/kvm/arm/hyp-abi.rst
@@ -60,7 +60,7 @@  these functions (see arch/arm{,64}/include/asm/virt.h):
 
 * ::
 
-    x0 = HVC_VHE_RESTART (arm64 only)
+    x0 = HVC_FINALISE_EL2 (arm64 only)
 
   Attempt to upgrade the kernel's exception level from EL1 to EL2 by enabling
   the VHE mode. This is conditioned by the CPU supporting VHE, the EL2 MMU
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 0e80db4327b6..dec6eee0eda5 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -36,9 +36,9 @@ 
 #define HVC_RESET_VECTORS 2
 
 /*
- * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible
+ * HVC_FINALISE_EL2 - Upgrade the CPU from EL1 to EL2, if possible
  */
-#define HVC_VHE_RESTART	3
+#define HVC_FINALISE_EL2	3
 
 /* Max number of HYP stub hypercalls */
 #define HVC_STUB_HCALL_NR 4
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 6a98f1a38c29..f8550a939a6e 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -463,7 +463,7 @@  SYM_FUNC_START_LOCAL(__primary_switched)
 	ret					// to __primary_switch()
 0:
 #endif
-	bl	switch_to_vhe			// Prefer VHE if possible
+	bl	finalise_el2			// Prefer VHE if possible
 	ldp	x29, x30, [sp], #16
 	bl	start_kernel
 	ASM_BUG()
@@ -553,7 +553,7 @@  SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
 	eret
 
 __cpu_stick_to_vhe:
-	mov	x0, #HVC_VHE_RESTART
+	mov	x0, #HVC_FINALISE_EL2
 	hvc	#0
 	mov	x0, #BOOT_CPU_MODE_EL2
 	ret
@@ -634,7 +634,7 @@  SYM_FUNC_START_LOCAL(secondary_startup)
 	/*
 	 * Common entry point for secondary CPUs.
 	 */
-	bl	switch_to_vhe
+	bl	finalise_el2
 	bl	__cpu_secondary_check52bitva
 	bl	__cpu_setup			// initialise processor
 	adrp	x1, swapper_pg_dir
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 43d212618834..26206e53e0c9 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -51,8 +51,8 @@  SYM_CODE_START_LOCAL(elx_sync)
 	msr	vbar_el2, x1
 	b	9f
 
-1:	cmp	x0, #HVC_VHE_RESTART
-	b.eq	mutate_to_vhe
+1:	cmp	x0, #HVC_FINALISE_EL2
+	b.eq	__finalise_el2
 
 2:	cmp	x0, #HVC_SOFT_RESTART
 	b.ne	3f
@@ -73,8 +73,8 @@  SYM_CODE_START_LOCAL(elx_sync)
 	eret
 SYM_CODE_END(elx_sync)
 
-// nVHE? No way! Give me the real thing!
-SYM_CODE_START_LOCAL(mutate_to_vhe)
+SYM_CODE_START_LOCAL(__finalise_el2)
+	// nVHE? No way! Give me the real thing!
 	// Sanity check: MMU *must* be off
 	mrs	x1, sctlr_el2
 	tbnz	x1, #0, 1f
@@ -140,10 +140,10 @@  SYM_CODE_START_LOCAL(mutate_to_vhe)
 	msr	spsr_el1, x0
 
 	b	enter_vhe
-SYM_CODE_END(mutate_to_vhe)
+SYM_CODE_END(__finalise_el2)
 
 	// At the point where we reach enter_vhe(), we run with
-	// the MMU off (which is enforced by mutate_to_vhe()).
+	// the MMU off (which is enforced by __finalise_el2()).
 	// We thus need to be in the idmap, or everything will
 	// explode when enabling the MMU.
 
@@ -222,9 +222,9 @@  SYM_FUNC_START(__hyp_reset_vectors)
 SYM_FUNC_END(__hyp_reset_vectors)
 
 /*
- * Entry point to switch to VHE if deemed capable
+ * Entry point to finalise EL2 and switch to VHE if deemed capable
  */
-SYM_FUNC_START(switch_to_vhe)
+SYM_FUNC_START(finalise_el2)
 	// Need to have booted at EL2
 	adr_l	x1, __boot_cpu_mode
 	ldr	w0, [x1]
@@ -236,9 +236,8 @@  SYM_FUNC_START(switch_to_vhe)
 	cmp	x0, #CurrentEL_EL1
 	b.ne	1f
 
-	// Turn the world upside down
-	mov	x0, #HVC_VHE_RESTART
+	mov	x0, #HVC_FINALISE_EL2
 	hvc	#0
 1:
 	ret
-SYM_FUNC_END(switch_to_vhe)
+SYM_FUNC_END(finalise_el2)
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 4ea9392f86e0..c7e01ee0bda8 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -100,7 +100,7 @@  SYM_FUNC_END(__cpu_suspend_enter)
 	.pushsection ".idmap.text", "awx"
 SYM_CODE_START(cpu_resume)
 	bl	init_kernel_el
-	bl	switch_to_vhe
+	bl	finalise_el2
 	bl	__cpu_setup
 	/* enable the MMU early - so we can access sleep_save_stash by va */
 	adrp	x1, swapper_pg_dir