diff mbox series

[v1,2/2] KVM: SVM: Implement reserved bit callback to set MMIO SPTE mask

Message ID 519cd0f0f2ff7fa0e097967546506c07e2e56dda.1576698347.git.thomas.lendacky@amd.com (mailing list archive)
State New, archived
Headers show
Series MMIO mask fix for AMD memory encryption support | expand

Commit Message

Tom Lendacky Dec. 18, 2019, 7:45 p.m. UTC
Register a reserved bit(s) mask callback that will check if memory
encryption is supported/enabled:
  If enabled, then the physical address width is reduced and the first
  bit after the last valid reduced physical address bit will always be
  reserved.

  If disabled, then the physical address width is not reduced, so bit 51
  can be used, unless the physical address width is 52. In this case,
  return zero for the mask.

Fixes: 28a1f3ac1d0c ("kvm: x86: Set highest physical address bits in non-present/reserved SPTEs")
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/kvm/svm.c | 42 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 42 insertions(+)
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 122d4ce3b1ab..a769aab45841 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -7242,6 +7242,46 @@  static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
 		   (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT));
 }
 
+static u64 svm_get_reserved_mask(void)
+{
+	u64 mask, msr;
+
+	/* The default mask, used when memory encryption is not enabled */
+	mask = 1ull << 51;
+
+	/* No support for memory encryption, use the default */
+	if (cpuid_eax(0x80000000) < 0x8000001f)
+		return mask;
+
+	/*
+	 * Check for memory encryption support. If memory encryption support
+	 * is enabled:
+	 *   The physical addressing width is reduced. The first bit above the
+	 *   new physical addressing limit will always be reserved.
+	 */
+	rdmsrl(MSR_K8_SYSCFG, msr);
+	if (msr & MSR_K8_SYSCFG_MEM_ENCRYPT) {
+		/*
+		 * x86_phys_bits has been adjusted as part of the memory
+		 * encryption support.
+		 */
+		mask = 1ull << boot_cpu_data.x86_phys_bits;
+
+		return mask;
+	}
+
+	/*
+	 * If memory encryption support is disabled:
+	 *   The physical addressing width is not reduced, so the default mask
+	 *   will always be reserved unless the physical addressing width is 52,
+	 *   in which case there are no reserved bits, so return an empty mask.
+	 */
+	if (IS_ENABLED(CONFIG_X86_64) && boot_cpu_data.x86_phys_bits == 52)
+		mask = 0;
+
+	return mask;
+}
+
 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.cpu_has_kvm_support = has_svm,
 	.disabled_by_bios = is_disabled,
@@ -7379,6 +7419,8 @@  static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
 
 	.apic_init_signal_blocked = svm_apic_init_signal_blocked,
+
+	.get_reserved_mask = svm_get_reserved_mask,
 };
 
 static int __init svm_init(void)