diff mbox series

[v2,21/24] kvm: arm64: Add kvm-arm.protected early kernel parameter

Message ID 20201116204318.63987-22-dbrazdil@google.com (mailing list archive)
State New, archived
Headers show
Series Opt-in always-on nVHE hypervisor | expand

Commit Message

David Brazdil Nov. 16, 2020, 8:43 p.m. UTC
Add an early parameter that allows users to opt into protected KVM mode
when using the nVHE hypervisor. In this mode, guest state will be kept
private from the host. This will primarily involve enabling stage-2
address translation for the host, restricting DMA to host memory, and
filtering host SMCs.

Capability ARM64_PROTECTED_KVM is set if the param is passed, CONFIG_KVM
is enabled and the kernel was not booted with VHE.

Signed-off-by: David Brazdil <dbrazdil@google.com>
---
 arch/arm64/include/asm/cpucaps.h |  3 ++-
 arch/arm64/include/asm/virt.h    |  8 ++++++++
 arch/arm64/kernel/cpufeature.c   | 29 +++++++++++++++++++++++++++++
 arch/arm64/kvm/arm.c             | 10 +++++++++-
 4 files changed, 48 insertions(+), 2 deletions(-)

Comments

Marc Zyngier Nov. 23, 2020, 5:30 p.m. UTC | #1
On Mon, 16 Nov 2020 20:43:15 +0000,
David Brazdil <dbrazdil@google.com> wrote:
> 
> Add an early parameter that allows users to opt into protected KVM mode
> when using the nVHE hypervisor. In this mode, guest state will be kept
> private from the host. This will primarily involve enabling stage-2
> address translation for the host, restricting DMA to host memory, and
> filtering host SMCs.
> 
> Capability ARM64_PROTECTED_KVM is set if the param is passed, CONFIG_KVM
> is enabled and the kernel was not booted with VHE.
> 
> Signed-off-by: David Brazdil <dbrazdil@google.com>
> ---
>  arch/arm64/include/asm/cpucaps.h |  3 ++-
>  arch/arm64/include/asm/virt.h    |  8 ++++++++
>  arch/arm64/kernel/cpufeature.c   | 29 +++++++++++++++++++++++++++++
>  arch/arm64/kvm/arm.c             | 10 +++++++++-
>  4 files changed, 48 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
> index e7d98997c09c..ac075f70b2e4 100644
> --- a/arch/arm64/include/asm/cpucaps.h
> +++ b/arch/arm64/include/asm/cpucaps.h
> @@ -66,7 +66,8 @@
>  #define ARM64_HAS_TLB_RANGE			56
>  #define ARM64_MTE				57
>  #define ARM64_WORKAROUND_1508412		58
> +#define ARM64_PROTECTED_KVM			59
>  
> -#define ARM64_NCAPS				59
> +#define ARM64_NCAPS				60
>  
>  #endif /* __ASM_CPUCAPS_H */
> diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
> index 6069be50baf9..2fde1186b962 100644
> --- a/arch/arm64/include/asm/virt.h
> +++ b/arch/arm64/include/asm/virt.h
> @@ -97,6 +97,14 @@ static __always_inline bool has_vhe(void)
>  		return cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN);
>  }
>  
> +static __always_inline bool is_protected_kvm_enabled(void)
> +{
> +	if (is_vhe_hyp_code())
> +		return false;
> +	else
> +		return cpus_have_final_cap(ARM64_PROTECTED_KVM);
> +}
> +
>  #endif /* __ASSEMBLY__ */
>  
>  #endif /* ! __ASM__VIRT_H */
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index 6f36c4f62f69..dd5bc0f0cf0d 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -1709,6 +1709,29 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
>  }
>  #endif /* CONFIG_ARM64_MTE */
>  
> +#ifdef CONFIG_KVM
> +static bool enable_protected_kvm;
> +
> +static bool has_protected_kvm(const struct arm64_cpu_capabilities *entry, int __unused)
> +{
> +	if (!enable_protected_kvm)
> +		return false;
> +
> +	if (is_kernel_in_hyp_mode()) {
> +		pr_warn("Protected KVM not available with VHE\n");
> +		return false;
> +	}
> +
> +	return true;
> +}
> +
> +static int __init early_protected_kvm_cfg(char *buf)
> +{
> +	return strtobool(buf, &enable_protected_kvm);
> +}
> +early_param("kvm-arm.protected", early_protected_kvm_cfg);

Please add some documentation to
Documentation/admin-guide/kernel-parameters.txt.

> +#endif /* CONFIG_KVM */
> +
>  /* Internal helper functions to match cpu capability type */
>  static bool
>  cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
> @@ -1822,6 +1845,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
>  		.field_pos = ID_AA64PFR0_EL1_SHIFT,
>  		.min_field_value = ID_AA64PFR0_EL1_32BIT_64BIT,
>  	},
> +	{
> +		.desc = "Protected KVM",
> +		.capability = ARM64_PROTECTED_KVM,
> +		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
> +		.matches = has_protected_kvm,
> +	},
>  #endif
>  	{
>  		.desc = "Kernel page table isolation (KPTI)",
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index c76a8e5bd19c..49d2474f2a80 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -1796,6 +1796,12 @@ int kvm_arch_init(void *opaque)
>  		return -ENODEV;
>  	}
>  
> +	/* The PROTECTED_KVM cap should not have been enabled for VHE. */
> +	if (in_hyp_mode && is_protected_kvm_enabled()) {
> +		kvm_pr_unimpl("VHE protected mode unsupported, not initializing\n");
> +		return -ENODEV;

How can this happen? Don't we already take care of this?

> +	}
> +
>  	if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
>  	    cpus_have_final_cap(ARM64_WORKAROUND_1508412))
>  		kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
> @@ -1827,7 +1833,9 @@ int kvm_arch_init(void *opaque)
>  	if (err)
>  		goto out_hyp;
>  
> -	if (in_hyp_mode)
> +	if (is_protected_kvm_enabled())
> +		kvm_info("Protected nVHE mode initialized successfully\n");
> +	else if (in_hyp_mode)
>  		kvm_info("VHE mode initialized successfully\n");
>  	else
>  		kvm_info("Hyp mode initialized successfully\n");
> -- 
> 2.29.2.299.gdc1121823c-goog
> 
> 

Thanks,

	M.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index e7d98997c09c..ac075f70b2e4 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -66,7 +66,8 @@ 
 #define ARM64_HAS_TLB_RANGE			56
 #define ARM64_MTE				57
 #define ARM64_WORKAROUND_1508412		58
+#define ARM64_PROTECTED_KVM			59
 
-#define ARM64_NCAPS				59
+#define ARM64_NCAPS				60
 
 #endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 6069be50baf9..2fde1186b962 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -97,6 +97,14 @@  static __always_inline bool has_vhe(void)
 		return cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN);
 }
 
+static __always_inline bool is_protected_kvm_enabled(void)
+{
+	if (is_vhe_hyp_code())
+		return false;
+	else
+		return cpus_have_final_cap(ARM64_PROTECTED_KVM);
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* ! __ASM__VIRT_H */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 6f36c4f62f69..dd5bc0f0cf0d 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1709,6 +1709,29 @@  static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
 }
 #endif /* CONFIG_ARM64_MTE */
 
+#ifdef CONFIG_KVM
+static bool enable_protected_kvm;
+
+static bool has_protected_kvm(const struct arm64_cpu_capabilities *entry, int __unused)
+{
+	if (!enable_protected_kvm)
+		return false;
+
+	if (is_kernel_in_hyp_mode()) {
+		pr_warn("Protected KVM not available with VHE\n");
+		return false;
+	}
+
+	return true;
+}
+
+static int __init early_protected_kvm_cfg(char *buf)
+{
+	return strtobool(buf, &enable_protected_kvm);
+}
+early_param("kvm-arm.protected", early_protected_kvm_cfg);
+#endif /* CONFIG_KVM */
+
 /* Internal helper functions to match cpu capability type */
 static bool
 cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
@@ -1822,6 +1845,12 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.field_pos = ID_AA64PFR0_EL1_SHIFT,
 		.min_field_value = ID_AA64PFR0_EL1_32BIT_64BIT,
 	},
+	{
+		.desc = "Protected KVM",
+		.capability = ARM64_PROTECTED_KVM,
+		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
+		.matches = has_protected_kvm,
+	},
 #endif
 	{
 		.desc = "Kernel page table isolation (KPTI)",
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index c76a8e5bd19c..49d2474f2a80 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1796,6 +1796,12 @@  int kvm_arch_init(void *opaque)
 		return -ENODEV;
 	}
 
+	/* The PROTECTED_KVM cap should not have been enabled for VHE. */
+	if (in_hyp_mode && is_protected_kvm_enabled()) {
+		kvm_pr_unimpl("VHE protected mode unsupported, not initializing\n");
+		return -ENODEV;
+	}
+
 	if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
 	    cpus_have_final_cap(ARM64_WORKAROUND_1508412))
 		kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
@@ -1827,7 +1833,9 @@  int kvm_arch_init(void *opaque)
 	if (err)
 		goto out_hyp;
 
-	if (in_hyp_mode)
+	if (is_protected_kvm_enabled())
+		kvm_info("Protected nVHE mode initialized successfully\n");
+	else if (in_hyp_mode)
 		kvm_info("VHE mode initialized successfully\n");
 	else
 		kvm_info("Hyp mode initialized successfully\n");