@@ -139,6 +139,8 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
+extern u32 hyp_va_bits;
+
/*
* We currently support using a VM-specified IPA size. For backward
* compatibility, the default IPA size is fixed to 40bits.
@@ -182,7 +184,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
-int __init kvm_mmu_init(u32 *hyp_va_bits);
+int __init kvm_mmu_init(void);
static inline void *__kvm_vector_slot2addr(void *base,
enum arm64_hyp_spectre_vector slot)
@@ -1987,7 +1987,7 @@ static int kvm_init_vector_slots(void)
return 0;
}
-static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
+static void __init cpu_prepare_hyp_mode(int cpu)
{
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
@@ -2351,7 +2351,7 @@ static void __init teardown_hyp_mode(void)
}
}
-static int __init do_pkvm_init(u32 hyp_va_bits)
+static int __init do_pkvm_init(void)
{
void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base));
int ret;
@@ -2412,7 +2412,7 @@ static void kvm_hyp_init_symbols(void)
kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
}
-static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
+static int __init kvm_hyp_init_protection(void)
{
void *addr = phys_to_virt(hyp_mem_base);
int ret;
@@ -2421,7 +2421,7 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
if (ret)
return ret;
- ret = do_pkvm_init(hyp_va_bits);
+ ret = do_pkvm_init();
if (ret)
return ret;
@@ -2505,7 +2505,6 @@ static void pkvm_hyp_init_ptrauth(void)
/* Inits Hyp-mode on all online CPUs */
static int __init init_hyp_mode(void)
{
- u32 hyp_va_bits;
int cpu;
int err = -ENOMEM;
@@ -2519,7 +2518,7 @@ static int __init init_hyp_mode(void)
/*
* Allocate Hyp PGD and setup Hyp identity mapping
*/
- err = kvm_mmu_init(&hyp_va_bits);
+ err = kvm_mmu_init();
if (err)
goto out_err;
@@ -2633,7 +2632,7 @@ static int __init init_hyp_mode(void)
}
/* Prepare the CPU initialization parameters */
- cpu_prepare_hyp_mode(cpu, hyp_va_bits);
+ cpu_prepare_hyp_mode(cpu);
}
kvm_hyp_init_symbols();
@@ -2654,7 +2653,7 @@ static int __init init_hyp_mode(void)
if (err)
goto out_err;
- err = kvm_hyp_init_protection(hyp_va_bits);
+ err = kvm_hyp_init_protection();
if (err) {
kvm_err("Failed to init hyp memory protection\n");
goto out_err;
@@ -29,6 +29,8 @@ static unsigned long __ro_after_init hyp_idmap_start;
static unsigned long __ro_after_init hyp_idmap_end;
static phys_addr_t __ro_after_init hyp_idmap_vector;
+u32 __ro_after_init hyp_va_bits;
+
static unsigned long __ro_after_init io_map_base;
static phys_addr_t __stage2_range_addr_end(phys_addr_t addr, phys_addr_t end,
@@ -1986,7 +1988,7 @@ static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = {
.virt_to_phys = kvm_host_pa,
};
-int __init kvm_mmu_init(u32 *hyp_va_bits)
+int __init kvm_mmu_init(void)
{
int err;
u32 idmap_bits;
@@ -2020,9 +2022,9 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
*/
idmap_bits = IDMAP_VA_BITS;
kernel_bits = vabits_actual;
- *hyp_va_bits = max(idmap_bits, kernel_bits);
+ hyp_va_bits = max(idmap_bits, kernel_bits);
- kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
+ kvm_debug("Using %u-bit virtual addresses at EL2\n", hyp_va_bits);
kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
kvm_debug("HYP VA range: %lx:%lx\n",
kern_hyp_va(PAGE_OFFSET),
@@ -2047,7 +2049,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
goto out;
}
- err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops);
+ err = kvm_pgtable_hyp_init(hyp_pgtable, hyp_va_bits, &kvm_hyp_mm_ops);
if (err)
goto out_free_pgtable;
@@ -19,6 +19,7 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <asm/kvm_mmu.h>
#include <asm/stacktrace/nvhe.h>
static struct stack_info stackinfo_get_overflow(void)
@@ -145,7 +146,7 @@ static void unwind(struct unwind_state *state,
*/
static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
{
- unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
+ unsigned long va_mask = GENMASK_ULL(hyp_va_bits - 1, 0);
unsigned long hyp_offset = (unsigned long)arg;
/* Mask tags and convert to kern addr */
The hypervisor VA space size depends on both the ID map's (IDMAP_VA_BITS) and the kernel stage-1 (VA_BITS). When VA_BITS is smaller than IDMAP_VA_BITS (i.e. 39-bit), the stacktrace can contain addresses bigger than the current VA_BITS mask. As the hyp_va_bits value needs to be used outside of the init code now, use a global variable, shared by all the kvm users in mmu.c, arm.c and now stacktrace.c. Signed-off-by: Vincent Donnefort <vdonnefort@google.com> base-commit: 13563da6ffcf49b8b45772e40b35f96926a7ee1e