diff mbox series

KVM: arm64: Fix nVHE stacktrace VA bits mask

Message ID 20250106183213.4094616-1-vdonnefort@google.com (mailing list archive)
State New
Headers show
Series KVM: arm64: Fix nVHE stacktrace VA bits mask | expand

Commit Message

Vincent Donnefort Jan. 6, 2025, 6:32 p.m. UTC
The hypervisor VA space size depends on both the ID map's
(IDMAP_VA_BITS) and the kernel stage-1 (VA_BITS). When VA_BITS is
smaller than IDMAP_VA_BITS (i.e. 39-bit), the stacktrace can contain
addresses bigger than the current VA_BITS mask.

As the hyp_va_bits value needs to be used outside of the init code now,
use a global variable, shared by all the kvm users in mmu.c, arm.c and
now stacktrace.c.

Signed-off-by: Vincent Donnefort <vdonnefort@google.com>


base-commit: 13563da6ffcf49b8b45772e40b35f96926a7ee1e

Comments

Marc Zyngier Jan. 7, 2025, 9:31 a.m. UTC | #1
Vincent,

Please add all the KVM/arm64 reviewers in the future (I added them
this time).

On Mon, 06 Jan 2025 18:32:13 +0000,
Vincent Donnefort <vdonnefort@google.com> wrote:
> 
> The hypervisor VA space size depends on both the ID map's
> (IDMAP_VA_BITS) and the kernel stage-1 (VA_BITS). When VA_BITS is
> smaller than IDMAP_VA_BITS (i.e. 39-bit), the stacktrace can contain
> addresses bigger than the current VA_BITS mask.
> 
> As the hyp_va_bits value needs to be used outside of the init code now,
> use a global variable, shared by all the kvm users in mmu.c, arm.c and
> now stacktrace.c.

I tend to dislike this approach for at least three reasons:

- it makes it hard to follow *when* hyp_va_bits is made valid, while
  passing the value as a parameter is self explanatory. Specially
  given how convoluted the nVHE/pKVM init is these days.

- it prevents the eventual use of *multiple* VA bit values (one for
  TTBR0, one for TTBR1) once the grand plan for hVHE is completed
  (right after full NV support is merged! ;-)

- it makes the change larger than it should be, specially for
  something that should be backported.

So I'd rather you keep the general shape of the code as it, and simply
publish this 'hyp_va_bits' for the purpose of the backtrace code.

Thanks,

	M.
Vincent Donnefort Jan. 7, 2025, 10:34 a.m. UTC | #2
On Tue, Jan 07, 2025 at 09:31:16AM +0000, Marc Zyngier wrote:
> Vincent,
> 
> Please add all the KVM/arm64 reviewers in the future (I added them
> this time).

Ack, apologies for the missing recipients.

> 
> On Mon, 06 Jan 2025 18:32:13 +0000,
> Vincent Donnefort <vdonnefort@google.com> wrote:
> > 
> > The hypervisor VA space size depends on both the ID map's
> > (IDMAP_VA_BITS) and the kernel stage-1 (VA_BITS). When VA_BITS is
> > smaller than IDMAP_VA_BITS (i.e. 39-bit), the stacktrace can contain
> > addresses bigger than the current VA_BITS mask.
> > 
> > As the hyp_va_bits value needs to be used outside of the init code now,
> > use a global variable, shared by all the kvm users in mmu.c, arm.c and
> > now stacktrace.c.
> 
> I tend to dislike this approach for at least three reasons:
> 
> - it makes it hard to follow *when* hyp_va_bits is made valid, while
>   passing the value as a parameter is self explanatory. Specially
>   given how convoluted the nVHE/pKVM init is these days.
> 
> - it prevents the eventual use of *multiple* VA bit values (one for
>   TTBR0, one for TTBR1) once the grand plan for hVHE is completed
>   (right after full NV support is merged! ;-)
> 
> - it makes the change larger than it should be, specially for
>   something that should be backported.

I was myself not completely convinced because of that last point. I'll send a v2
with a shorter version.

Thanks for your swift review! 

> 
> So I'd rather you keep the general shape of the code as it, and simply
> publish this 'hyp_va_bits' for the purpose of the backtrace code.
> 
> Thanks,
> 
> 	M.
> 
> -- 
> Without deviation from the norm, progress is not possible.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 66d93e320ec8..8195a77056a9 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -139,6 +139,8 @@  static __always_inline unsigned long __kern_hyp_va(unsigned long v)
 
 #define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))
 
+extern u32 hyp_va_bits;
+
 /*
  * We currently support using a VM-specified IPA size. For backward
  * compatibility, the default IPA size is fixed to 40bits.
@@ -182,7 +184,7 @@  int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
 
 phys_addr_t kvm_mmu_get_httbr(void);
 phys_addr_t kvm_get_idmap_vector(void);
-int __init kvm_mmu_init(u32 *hyp_va_bits);
+int __init kvm_mmu_init(void);
 
 static inline void *__kvm_vector_slot2addr(void *base,
 					   enum arm64_hyp_spectre_vector slot)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index a102c3aebdbc..90d28c35c5b5 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1987,7 +1987,7 @@  static int kvm_init_vector_slots(void)
 	return 0;
 }
 
-static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
+static void __init cpu_prepare_hyp_mode(int cpu)
 {
 	struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
 	u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
@@ -2351,7 +2351,7 @@  static void __init teardown_hyp_mode(void)
 	}
 }
 
-static int __init do_pkvm_init(u32 hyp_va_bits)
+static int __init do_pkvm_init(void)
 {
 	void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base));
 	int ret;
@@ -2412,7 +2412,7 @@  static void kvm_hyp_init_symbols(void)
 	kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
 }
 
-static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
+static int __init kvm_hyp_init_protection(void)
 {
 	void *addr = phys_to_virt(hyp_mem_base);
 	int ret;
@@ -2421,7 +2421,7 @@  static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
 	if (ret)
 		return ret;
 
-	ret = do_pkvm_init(hyp_va_bits);
+	ret = do_pkvm_init();
 	if (ret)
 		return ret;
 
@@ -2505,7 +2505,6 @@  static void pkvm_hyp_init_ptrauth(void)
 /* Inits Hyp-mode on all online CPUs */
 static int __init init_hyp_mode(void)
 {
-	u32 hyp_va_bits;
 	int cpu;
 	int err = -ENOMEM;
 
@@ -2519,7 +2518,7 @@  static int __init init_hyp_mode(void)
 	/*
 	 * Allocate Hyp PGD and setup Hyp identity mapping
 	 */
-	err = kvm_mmu_init(&hyp_va_bits);
+	err = kvm_mmu_init();
 	if (err)
 		goto out_err;
 
@@ -2633,7 +2632,7 @@  static int __init init_hyp_mode(void)
 		}
 
 		/* Prepare the CPU initialization parameters */
-		cpu_prepare_hyp_mode(cpu, hyp_va_bits);
+		cpu_prepare_hyp_mode(cpu);
 	}
 
 	kvm_hyp_init_symbols();
@@ -2654,7 +2653,7 @@  static int __init init_hyp_mode(void)
 		if (err)
 			goto out_err;
 
-		err = kvm_hyp_init_protection(hyp_va_bits);
+		err = kvm_hyp_init_protection();
 		if (err) {
 			kvm_err("Failed to init hyp memory protection\n");
 			goto out_err;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index c9d46ad57e52..62a99c86cd1d 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -29,6 +29,8 @@  static unsigned long __ro_after_init hyp_idmap_start;
 static unsigned long __ro_after_init hyp_idmap_end;
 static phys_addr_t __ro_after_init hyp_idmap_vector;
 
+u32 __ro_after_init hyp_va_bits;
+
 static unsigned long __ro_after_init io_map_base;
 
 static phys_addr_t __stage2_range_addr_end(phys_addr_t addr, phys_addr_t end,
@@ -1986,7 +1988,7 @@  static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = {
 	.virt_to_phys		= kvm_host_pa,
 };
 
-int __init kvm_mmu_init(u32 *hyp_va_bits)
+int __init kvm_mmu_init(void)
 {
 	int err;
 	u32 idmap_bits;
@@ -2020,9 +2022,9 @@  int __init kvm_mmu_init(u32 *hyp_va_bits)
 	 */
 	idmap_bits = IDMAP_VA_BITS;
 	kernel_bits = vabits_actual;
-	*hyp_va_bits = max(idmap_bits, kernel_bits);
+	hyp_va_bits = max(idmap_bits, kernel_bits);
 
-	kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
+	kvm_debug("Using %u-bit virtual addresses at EL2\n", hyp_va_bits);
 	kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
 	kvm_debug("HYP VA range: %lx:%lx\n",
 		  kern_hyp_va(PAGE_OFFSET),
@@ -2047,7 +2049,7 @@  int __init kvm_mmu_init(u32 *hyp_va_bits)
 		goto out;
 	}
 
-	err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops);
+	err = kvm_pgtable_hyp_init(hyp_pgtable, hyp_va_bits, &kvm_hyp_mm_ops);
 	if (err)
 		goto out_free_pgtable;
 
diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c
index 3ace5b75813b..ef7a22598d89 100644
--- a/arch/arm64/kvm/stacktrace.c
+++ b/arch/arm64/kvm/stacktrace.c
@@ -19,6 +19,7 @@ 
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 
+#include <asm/kvm_mmu.h>
 #include <asm/stacktrace/nvhe.h>
 
 static struct stack_info stackinfo_get_overflow(void)
@@ -145,7 +146,7 @@  static void unwind(struct unwind_state *state,
  */
 static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
 {
-	unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
+	unsigned long va_mask = GENMASK_ULL(hyp_va_bits - 1, 0);
 	unsigned long hyp_offset = (unsigned long)arg;
 
 	/* Mask tags and convert to kern addr */