diff mbox series

[RFC,7/8] KVM: x86: Add hooks in kvm_arch_vcpu_map_memory()

Message ID fa1b167cbb0473e90144315bfbdea1a7d187cae6.1709288671.git.isaku.yamahata@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: Prepopulate guest memory API | expand

Commit Message

Isaku Yamahata March 1, 2024, 5:28 p.m. UTC
From: Isaku Yamahata <isaku.yamahata@intel.com>

In the case of TDX, the memory contents needs to be provided to be
encrypted when populating guest memory before running the guest.  Add hooks
in kvm_mmu_map_tdp_page() for KVM_MAP_MEMORY before/after calling
kvm_mmu_tdp_page().  TDX KVM will use the hooks.

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
---
 arch/x86/include/asm/kvm-x86-ops.h |  2 ++
 arch/x86/include/asm/kvm_host.h    |  6 ++++++
 arch/x86/kvm/x86.c                 | 34 ++++++++++++++++++++++++++++++
 3 files changed, 42 insertions(+)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 3942b74c1b75..fc4e11d40733 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -137,6 +137,8 @@  KVM_X86_OP(complete_emulated_msr)
 KVM_X86_OP(vcpu_deliver_sipi_vector)
 KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
 KVM_X86_OP_OPTIONAL(get_untagged_addr)
+KVM_X86_OP_OPTIONAL(pre_mmu_map_page);
+KVM_X86_OP_OPTIONAL(post_mmu_map_page);
 
 #undef KVM_X86_OP
 #undef KVM_X86_OP_OPTIONAL
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 9e7b1a00e265..301fedd6b156 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1805,6 +1805,12 @@  struct kvm_x86_ops {
 	unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
 
 	gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
+
+	int (*pre_mmu_map_page)(struct kvm_vcpu *vcpu,
+				struct kvm_memory_mapping *mapping,
+				u32 *error_code, u8 *max_level);
+	void (*post_mmu_map_page)(struct kvm_vcpu *vcpu,
+				  struct kvm_memory_mapping *mapping);
 };
 
 struct kvm_x86_nested_ops {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6025c0e12d89..ba8bf35f1c9a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5811,6 +5811,36 @@  int kvm_arch_vcpu_pre_map_memory(struct kvm_vcpu *vcpu)
 	return kvm_mmu_reload(vcpu);
 }
 
+static int kvm_pre_mmu_map_page(struct kvm_vcpu *vcpu,
+				struct kvm_memory_mapping *mapping,
+				u32 error_code, u8 *max_level)
+{
+	int r = 0;
+
+	if (vcpu->kvm->arch.vm_type == KVM_X86_DEFAULT_VM ||
+	    vcpu->kvm->arch.vm_type == KVM_X86_SW_PROTECTED_VM) {
+		if (mapping->source)
+			r = -EINVAL;
+	} else if (kvm_x86_ops.pre_mmu_map_page)
+		r = static_call(kvm_x86_pre_mmu_map_page)(vcpu, mapping,
+							  &error_code,
+							  max_level);
+	else
+		r = -EOPNOTSUPP;
+
+	return r;
+}
+
+static void kvm_post_mmu_map_page(struct kvm_vcpu *vcpu, struct kvm_memory_mapping *mapping)
+{
+	if (vcpu->kvm->arch.vm_type == KVM_X86_DEFAULT_VM ||
+	    vcpu->kvm->arch.vm_type == KVM_X86_SW_PROTECTED_VM)
+		return;
+
+	if (kvm_x86_ops.post_mmu_map_page)
+		static_call(kvm_x86_post_mmu_map_page)(vcpu, mapping);
+}
+
 int kvm_arch_vcpu_map_memory(struct kvm_vcpu *vcpu,
 			     struct kvm_memory_mapping *mapping)
 {
@@ -5842,8 +5872,12 @@  int kvm_arch_vcpu_map_memory(struct kvm_vcpu *vcpu,
 	else
 		max_level = PG_LEVEL_4K;
 
+	r = kvm_pre_mmu_map_page(vcpu, mapping, error_code, &max_level);
+	if (r)
+		return r;
 	r = kvm_mmu_map_page(vcpu, gfn_to_gpa(mapping->base_gfn), error_code,
 			     max_level, &goal_level);
+	kvm_post_mmu_map_page(vcpu, mapping);
 	if (r)
 		return r;