Message ID | 20230526143348.4072074-5-maz@kernel.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: arm64: Allow using VHE in the nVHE hypervisor | expand |
Hey Marc, I'm an idiot and was responding to v1. Here's the same damn comment, but on v2! On Fri, May 26, 2023 at 03:33:35PM +0100, Marc Zyngier wrote: > Expose a capability keying the hVHE feature as well as a new > predicate testing it. Nothing is so far using it, and nothing > is enabling it yet. > > Signed-off-by: Marc Zyngier <maz@kernel.org> > --- > arch/arm64/include/asm/cpufeature.h | 1 + > arch/arm64/include/asm/virt.h | 8 ++++++++ > arch/arm64/kernel/cpufeature.c | 15 +++++++++++++++ > arch/arm64/tools/cpucaps | 1 + > 4 files changed, 25 insertions(+) > > diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h > index bc1009890180..3d4b547ae312 100644 > --- a/arch/arm64/include/asm/cpufeature.h > +++ b/arch/arm64/include/asm/cpufeature.h > @@ -16,6 +16,7 @@ > #define cpu_feature(x) KERNEL_HWCAP_ ## x > > #define ARM64_SW_FEATURE_OVERRIDE_NOKASLR 0 > +#define ARM64_SW_FEATURE_OVERRIDE_HVHE 4 > > #ifndef __ASSEMBLY__ > > diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h > index 91029709d133..5f84a87a6a2d 100644 > --- a/arch/arm64/include/asm/virt.h > +++ b/arch/arm64/include/asm/virt.h > @@ -145,6 +145,14 @@ static __always_inline bool is_protected_kvm_enabled(void) > return cpus_have_final_cap(ARM64_KVM_PROTECTED_MODE); > } > > +static __always_inline bool has_hvhe(void) > +{ > + if (is_vhe_hyp_code()) > + return false; > + > + return cpus_have_final_cap(ARM64_KVM_HVHE); > +} > + > static inline bool is_hyp_nvhe(void) > { > return is_hyp_mode_available() && !is_kernel_in_hyp_mode(); > diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c > index 2d2b7bb5fa0c..04ef60571b37 100644 > --- a/arch/arm64/kernel/cpufeature.c > +++ b/arch/arm64/kernel/cpufeature.c > @@ -1998,6 +1998,15 @@ static bool has_nested_virt_support(const struct arm64_cpu_capabilities *cap, > return true; > } > > +static bool hvhe_possible(const struct arm64_cpu_capabilities *entry, > + int __unused) > +{ > + u64 val; > + > + val = arm64_sw_feature_override.val & arm64_sw_feature_override.mask; > + return cpuid_feature_extract_unsigned_field(val, ARM64_SW_FEATURE_OVERRIDE_HVHE); > +} Does this need to test ID_AA64MMFR1_EL1.VH as well? Otherwise I don't see what would stop us from attempting hVHE on a system with asymmetric support for VHE, as the software override was only evaluated on the boot CPU. > #ifdef CONFIG_ARM64_PAN > static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) > { > @@ -2643,6 +2652,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = { > .cpu_enable = cpu_enable_dit, > ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, DIT, IMP) > }, > + { > + .desc = "VHE for hypervisor only", > + .capability = ARM64_KVM_HVHE, > + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, > + .matches = hvhe_possible, > + }, > {}, > }; > > diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps > index 40ba95472594..3c23a55d7c2f 100644 > --- a/arch/arm64/tools/cpucaps > +++ b/arch/arm64/tools/cpucaps > @@ -47,6 +47,7 @@ HAS_TLB_RANGE > HAS_VIRT_HOST_EXTN > HAS_WFXT > HW_DBM > +KVM_HVHE > KVM_PROTECTED_MODE > MISMATCHED_CACHE_TYPE > MTE > -- > 2.34.1 >
Hi Oliver, On Thu, 01 Jun 2023 08:06:24 +0100, Oliver Upton <oliver.upton@linux.dev> wrote: > > Hey Marc, > > I'm an idiot and was responding to v1. Here's the same damn comment, but > on v2! Probably means that I'm even more of an idiot by sending the same buggy code twice! :D > > On Fri, May 26, 2023 at 03:33:35PM +0100, Marc Zyngier wrote: > > Expose a capability keying the hVHE feature as well as a new > > predicate testing it. Nothing is so far using it, and nothing > > is enabling it yet. > > > > Signed-off-by: Marc Zyngier <maz@kernel.org> > > --- > > arch/arm64/include/asm/cpufeature.h | 1 + > > arch/arm64/include/asm/virt.h | 8 ++++++++ > > arch/arm64/kernel/cpufeature.c | 15 +++++++++++++++ > > arch/arm64/tools/cpucaps | 1 + > > 4 files changed, 25 insertions(+) > > > > diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h > > index bc1009890180..3d4b547ae312 100644 > > --- a/arch/arm64/include/asm/cpufeature.h > > +++ b/arch/arm64/include/asm/cpufeature.h > > @@ -16,6 +16,7 @@ > > #define cpu_feature(x) KERNEL_HWCAP_ ## x > > > > #define ARM64_SW_FEATURE_OVERRIDE_NOKASLR 0 > > +#define ARM64_SW_FEATURE_OVERRIDE_HVHE 4 > > > > #ifndef __ASSEMBLY__ > > > > diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h > > index 91029709d133..5f84a87a6a2d 100644 > > --- a/arch/arm64/include/asm/virt.h > > +++ b/arch/arm64/include/asm/virt.h > > @@ -145,6 +145,14 @@ static __always_inline bool is_protected_kvm_enabled(void) > > return cpus_have_final_cap(ARM64_KVM_PROTECTED_MODE); > > } > > > > +static __always_inline bool has_hvhe(void) > > +{ > > + if (is_vhe_hyp_code()) > > + return false; > > + > > + return cpus_have_final_cap(ARM64_KVM_HVHE); > > +} > > + > > static inline bool is_hyp_nvhe(void) > > { > > return is_hyp_mode_available() && !is_kernel_in_hyp_mode(); > > diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c > > index 2d2b7bb5fa0c..04ef60571b37 100644 > > --- a/arch/arm64/kernel/cpufeature.c > > +++ b/arch/arm64/kernel/cpufeature.c > > @@ -1998,6 +1998,15 @@ static bool has_nested_virt_support(const struct arm64_cpu_capabilities *cap, > > return true; > > } > > > > +static bool hvhe_possible(const struct arm64_cpu_capabilities *entry, > > + int __unused) > > +{ > > + u64 val; > > + > > + val = arm64_sw_feature_override.val & arm64_sw_feature_override.mask; > > + return cpuid_feature_extract_unsigned_field(val, ARM64_SW_FEATURE_OVERRIDE_HVHE); > > +} > > Does this need to test ID_AA64MMFR1_EL1.VH as well? Otherwise I don't > see what would stop us from attempting hVHE on a system with asymmetric > support for VHE, as the software override was only evaluated on the boot > CPU. Huh. You obviously have a filthy mind. Yeah, we could also test for the sanitised view of MMFR1.VH and change our mind at the last minute. I'll add a check. It also probably means that I need to make this a "ARM64_CPUCAP_SYSTEM_FEATURE" instead of a "ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE" (I think...). But it has to be said that such a system, even without my hacks, would badly explode if the boot CPU was VHE capable and a secondary wasn't. The boot logic would keep one CPU at EL2 and move the secondary to EL1, and things would seamingly work until you try to do things like TLB invalidation (and you probably wouldn't even get a timer interrupt...). Thanks, M.
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index bc1009890180..3d4b547ae312 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -16,6 +16,7 @@ #define cpu_feature(x) KERNEL_HWCAP_ ## x #define ARM64_SW_FEATURE_OVERRIDE_NOKASLR 0 +#define ARM64_SW_FEATURE_OVERRIDE_HVHE 4 #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 91029709d133..5f84a87a6a2d 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -145,6 +145,14 @@ static __always_inline bool is_protected_kvm_enabled(void) return cpus_have_final_cap(ARM64_KVM_PROTECTED_MODE); } +static __always_inline bool has_hvhe(void) +{ + if (is_vhe_hyp_code()) + return false; + + return cpus_have_final_cap(ARM64_KVM_HVHE); +} + static inline bool is_hyp_nvhe(void) { return is_hyp_mode_available() && !is_kernel_in_hyp_mode(); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 2d2b7bb5fa0c..04ef60571b37 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1998,6 +1998,15 @@ static bool has_nested_virt_support(const struct arm64_cpu_capabilities *cap, return true; } +static bool hvhe_possible(const struct arm64_cpu_capabilities *entry, + int __unused) +{ + u64 val; + + val = arm64_sw_feature_override.val & arm64_sw_feature_override.mask; + return cpuid_feature_extract_unsigned_field(val, ARM64_SW_FEATURE_OVERRIDE_HVHE); +} + #ifdef CONFIG_ARM64_PAN static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) { @@ -2643,6 +2652,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_dit, ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, DIT, IMP) }, + { + .desc = "VHE for hypervisor only", + .capability = ARM64_KVM_HVHE, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = hvhe_possible, + }, {}, }; diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 40ba95472594..3c23a55d7c2f 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -47,6 +47,7 @@ HAS_TLB_RANGE HAS_VIRT_HOST_EXTN HAS_WFXT HW_DBM +KVM_HVHE KVM_PROTECTED_MODE MISMATCHED_CACHE_TYPE MTE
Expose a capability keying the hVHE feature as well as a new predicate testing it. Nothing is so far using it, and nothing is enabling it yet. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/include/asm/cpufeature.h | 1 + arch/arm64/include/asm/virt.h | 8 ++++++++ arch/arm64/kernel/cpufeature.c | 15 +++++++++++++++ arch/arm64/tools/cpucaps | 1 + 4 files changed, 25 insertions(+)