diff mbox series

[14/15] KVM: x86: Hoist SVM MSR intercepts to common x86 code

Message ID 20241127201929.4005605-15-aaronlewis@google.com (mailing list archive)
State New
Headers show
Series Unify MSR intercepts in x86 | expand

Commit Message

Aaron Lewis Nov. 27, 2024, 8:19 p.m. UTC
Now that the SVM and VMX implementations for MSR intercepts are the
same hoist the SVM implementation to common x86 code.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Aaron Lewis <aaronlewis@google.com>
---
 arch/x86/include/asm/kvm-x86-ops.h |  1 +
 arch/x86/include/asm/kvm_host.h    |  3 ++
 arch/x86/kvm/svm/svm.c             | 73 ++---------------------------
 arch/x86/kvm/x86.c                 | 75 ++++++++++++++++++++++++++++++
 arch/x86/kvm/x86.h                 |  2 +
 5 files changed, 86 insertions(+), 68 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 124c2e1e42026..3f10ce4957f74 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -132,6 +132,7 @@  KVM_X86_OP(apic_init_signal_blocked)
 KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush)
 KVM_X86_OP_OPTIONAL(migrate_timers)
 KVM_X86_OP_OPTIONAL(msr_filter_changed)
+KVM_X86_OP_OPTIONAL(get_msr_bitmap_entries)
 KVM_X86_OP(disable_intercept_for_msr)
 KVM_X86_OP(complete_emulated_msr)
 KVM_X86_OP(vcpu_deliver_sipi_vector)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 808b5365e4bd2..763fc054a2c56 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1830,6 +1830,9 @@  struct kvm_x86_ops {
 
 	const u32 * const possible_passthrough_msrs;
 	const u32 nr_possible_passthrough_msrs;
+	void (*get_msr_bitmap_entries)(struct kvm_vcpu *vcpu, u32 msr,
+				       unsigned long **read_map, u8 *read_bit,
+				       unsigned long **write_map, u8 *write_bit);
 	void (*disable_intercept_for_msr)(struct kvm_vcpu *vcpu, u32 msr, int type);
 	void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
 	int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 31ed6c68e8194..aaf244e233b90 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -799,84 +799,20 @@  static void svm_get_msr_bitmap_entries(struct kvm_vcpu *vcpu, u32 msr,
 	*write_map = &svm->msrpm[offset];
 }
 
-#define BUILD_SVM_MSR_BITMAP_HELPER(fn, bitop, access)			     \
-static inline void fn(struct kvm_vcpu *vcpu, u32 msr)			     \
-{									     \
-	unsigned long *read_map, *write_map;				     \
-	u8 read_bit, write_bit;						     \
-									     \
-	svm_get_msr_bitmap_entries(vcpu, msr, &read_map, &read_bit,	     \
-				   &write_map, &write_bit);		     \
-	bitop(access##_bit, access##_map);				     \
-}
-
-BUILD_SVM_MSR_BITMAP_HELPER(svm_set_msr_bitmap_read, __set_bit, read)
-BUILD_SVM_MSR_BITMAP_HELPER(svm_set_msr_bitmap_write, __set_bit, write)
-BUILD_SVM_MSR_BITMAP_HELPER(svm_clear_msr_bitmap_read, __clear_bit, read)
-BUILD_SVM_MSR_BITMAP_HELPER(svm_clear_msr_bitmap_write, __clear_bit, write)
-
 void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
 {
-	struct vcpu_svm *svm = to_svm(vcpu);
-	int slot;
-
-	slot = kvm_passthrough_msr_slot(msr);
-	WARN_ON(slot == -ENOENT);
-	if (slot >= 0) {
-		/* Set the shadow bitmaps to the desired intercept states */
-		if (type & MSR_TYPE_R)
-			__clear_bit(slot, vcpu->arch.shadow_msr_intercept.read);
-		if (type & MSR_TYPE_W)
-			__clear_bit(slot, vcpu->arch.shadow_msr_intercept.write);
-	}
-
-	/*
-	 * Don't disabled interception for the MSR if userspace wants to
-	 * handle it.
-	 */
-	if ((type & MSR_TYPE_R) && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) {
-		svm_set_msr_bitmap_read(vcpu, msr);
-		type &= ~MSR_TYPE_R;
-	}
-
-	if ((type & MSR_TYPE_W) && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) {
-		svm_set_msr_bitmap_write(vcpu, msr);
-		type &= ~MSR_TYPE_W;
-	}
-
-	if (type & MSR_TYPE_R)
-		svm_clear_msr_bitmap_read(vcpu, msr);
-
-	if (type & MSR_TYPE_W)
-		svm_clear_msr_bitmap_write(vcpu, msr);
+	kvm_disable_intercept_for_msr(vcpu, msr, type);
 
 	svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
-	svm->nested.force_msr_bitmap_recalc = true;
+	to_svm(vcpu)->nested.force_msr_bitmap_recalc = true;
 }
 
 void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
 {
-	struct vcpu_svm *svm = to_svm(vcpu);
-	int slot;
-
-	slot = kvm_passthrough_msr_slot(msr);
-	WARN_ON(slot == -ENOENT);
-	if (slot >= 0) {
-		/* Set the shadow bitmaps to the desired intercept states */
-		if (type & MSR_TYPE_R)
-			__set_bit(slot, vcpu->arch.shadow_msr_intercept.read);
-		if (type & MSR_TYPE_W)
-			__set_bit(slot, vcpu->arch.shadow_msr_intercept.write);
-	}
-
-	if (type & MSR_TYPE_R)
-		svm_set_msr_bitmap_read(vcpu, msr);
-
-	if (type & MSR_TYPE_W)
-		svm_set_msr_bitmap_write(vcpu, msr);
+	kvm_enable_intercept_for_msr(vcpu, msr, type);
 
 	svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
-	svm->nested.force_msr_bitmap_recalc = true;
+	to_svm(vcpu)->nested.force_msr_bitmap_recalc = true;
 }
 
 unsigned long *svm_vcpu_alloc_msrpm(void)
@@ -5127,6 +5063,7 @@  static struct kvm_x86_ops svm_x86_ops __initdata = {
 
 	.possible_passthrough_msrs = direct_access_msrs,
 	.nr_possible_passthrough_msrs = ARRAY_SIZE(direct_access_msrs),
+	.get_msr_bitmap_entries = svm_get_msr_bitmap_entries,
 	.disable_intercept_for_msr = svm_disable_intercept_for_msr,
 	.complete_emulated_msr = svm_complete_emulated_msr,
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2082ae8dc5db1..1e607a0eb58a0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1819,6 +1819,81 @@  int kvm_passthrough_msr_slot(u32 msr)
 }
 EXPORT_SYMBOL_GPL(kvm_passthrough_msr_slot);
 
+#define BUILD_KVM_MSR_BITMAP_HELPER(fn, bitop, access)			     \
+static inline void fn(struct kvm_vcpu *vcpu, u32 msr)			     \
+{									     \
+	unsigned long *read_map, *write_map;				     \
+	u8 read_bit, write_bit;						     \
+									     \
+	static_call(kvm_x86_get_msr_bitmap_entries)(vcpu, msr,		     \
+						    &read_map, &read_bit,    \
+				   		    &write_map, &write_bit); \
+	bitop(access##_bit, access##_map);				     \
+}
+
+BUILD_KVM_MSR_BITMAP_HELPER(kvm_set_msr_bitmap_read, __set_bit, read)
+BUILD_KVM_MSR_BITMAP_HELPER(kvm_set_msr_bitmap_write, __set_bit, write)
+BUILD_KVM_MSR_BITMAP_HELPER(kvm_clear_msr_bitmap_read, __clear_bit, read)
+BUILD_KVM_MSR_BITMAP_HELPER(kvm_clear_msr_bitmap_write, __clear_bit, write)
+
+void kvm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+{
+	int slot;
+
+	slot = kvm_passthrough_msr_slot(msr);
+	WARN_ON(slot == -ENOENT);
+	if (slot >= 0) {
+		/* Set the shadow bitmaps to the desired intercept states */
+		if (type & MSR_TYPE_R)
+			__clear_bit(slot, vcpu->arch.shadow_msr_intercept.read);
+		if (type & MSR_TYPE_W)
+			__clear_bit(slot, vcpu->arch.shadow_msr_intercept.write);
+	}
+
+	/*
+	 * Don't disabled interception for the MSR if userspace wants to
+	 * handle it.
+	 */
+	if ((type & MSR_TYPE_R) && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) {
+		kvm_set_msr_bitmap_read(vcpu, msr);
+		type &= ~MSR_TYPE_R;
+	}
+
+	if ((type & MSR_TYPE_W) && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) {
+		kvm_set_msr_bitmap_write(vcpu, msr);
+		type &= ~MSR_TYPE_W;
+	}
+
+	if (type & MSR_TYPE_R)
+		kvm_clear_msr_bitmap_read(vcpu, msr);
+
+	if (type & MSR_TYPE_W)
+		kvm_clear_msr_bitmap_write(vcpu, msr);
+}
+EXPORT_SYMBOL_GPL(kvm_disable_intercept_for_msr);
+
+void kvm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+{
+	int slot;
+
+	slot = kvm_passthrough_msr_slot(msr);
+	WARN_ON(slot == -ENOENT);
+	if (slot >= 0) {
+		/* Set the shadow bitmaps to the desired intercept states */
+		if (type & MSR_TYPE_R)
+			__set_bit(slot, vcpu->arch.shadow_msr_intercept.read);
+		if (type & MSR_TYPE_W)
+			__set_bit(slot, vcpu->arch.shadow_msr_intercept.write);
+	}
+
+	if (type & MSR_TYPE_R)
+		kvm_set_msr_bitmap_read(vcpu, msr);
+
+	if (type & MSR_TYPE_W)
+		kvm_set_msr_bitmap_write(vcpu, msr);
+}
+EXPORT_SYMBOL_GPL(kvm_enable_intercept_for_msr);
+
 static void kvm_msr_filter_changed(struct kvm_vcpu *vcpu)
 {
 	u32 msr, i;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 208f0698c64e2..239cc4de49c58 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -556,6 +556,8 @@  int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
 int kvm_passthrough_msr_slot(u32 msr);
+void kvm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
+void kvm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
 
 enum kvm_msr_access {
 	MSR_TYPE_R	= BIT(0),