diff mbox series

[RFC,v7,17/78] KVM: svm: pass struct kvm_vcpu to set_msr_interception()

Message ID 20200207181636.1065-18-alazar@bitdefender.com (mailing list archive)
State New, archived
Headers show
Series VM introspection | expand

Commit Message

Adalbert Lazăr Feb. 7, 2020, 6:15 p.m. UTC
From: Nicușor Cîțu <ncitu@bitdefender.com>

This is needed in order to handle clients controlling the MSR related
VM-exits.

Signed-off-by: Nicușor Cîțu <ncitu@bitdefender.com>
Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com>
---
 arch/x86/kvm/svm.c | 27 +++++++++++++++------------
 1 file changed, 15 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 0021d8c2feca..174ced633b60 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1077,7 +1077,8 @@  static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
 	return !!test_bit(bit_write,  &tmp);
 }
 
-static void set_msr_interception(u32 *msrpm, unsigned msr,
+static void set_msr_interception(struct kvm_vcpu *vcpu,
+				 u32 *msrpm, unsigned msr,
 				 int type, bool value)
 {
 	u8 bit_read, bit_write;
@@ -1116,7 +1117,7 @@  static void svm_vcpu_init_msrpm(u32 *msrpm)
 		if (!direct_access_msrs[i].always)
 			continue;
 
-		set_msr_interception(msrpm, direct_access_msrs[i].index,
+		set_msr_interception(NULL, msrpm, direct_access_msrs[i].index,
 				     MSR_TYPE_RW, 1);
 	}
 }
@@ -1169,13 +1170,13 @@  static void svm_enable_lbrv(struct vcpu_svm *svm)
 	u32 *msrpm = svm->msrpm;
 
 	svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
-	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP,
+	set_msr_interception(&svm->vcpu, msrpm, MSR_IA32_LASTBRANCHFROMIP,
 			     MSR_TYPE_RW, 1);
-	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP,
+	set_msr_interception(&svm->vcpu, msrpm, MSR_IA32_LASTBRANCHTOIP,
 			     MSR_TYPE_RW, 1);
-	set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP,
+	set_msr_interception(&svm->vcpu, msrpm, MSR_IA32_LASTINTFROMIP,
 			     MSR_TYPE_RW, 1);
-	set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP,
+	set_msr_interception(&svm->vcpu, msrpm, MSR_IA32_LASTINTTOIP,
 			     MSR_TYPE_RW, 1);
 }
 
@@ -1184,13 +1185,13 @@  static void svm_disable_lbrv(struct vcpu_svm *svm)
 	u32 *msrpm = svm->msrpm;
 
 	svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
-	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP,
+	set_msr_interception(&svm->vcpu, msrpm, MSR_IA32_LASTBRANCHFROMIP,
 			     MSR_TYPE_RW, 0);
-	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP,
+	set_msr_interception(&svm->vcpu, msrpm, MSR_IA32_LASTBRANCHTOIP,
 			     MSR_TYPE_RW, 0);
-	set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP,
+	set_msr_interception(&svm->vcpu, msrpm, MSR_IA32_LASTINTFROMIP,
 			     MSR_TYPE_RW, 0);
-	set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP,
+	set_msr_interception(&svm->vcpu, msrpm, MSR_IA32_LASTINTTOIP,
 			     MSR_TYPE_RW, 0);
 }
 
@@ -4327,7 +4328,7 @@  static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 		 * We update the L1 MSR bit as well since it will end up
 		 * touching the MSR anyway now.
 		 */
-		set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL,
+		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL,
 				     MSR_TYPE_RW, 1);
 		break;
 	case MSR_IA32_PRED_CMD:
@@ -4344,7 +4345,9 @@  static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 		wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
 		if (is_guest_mode(vcpu))
 			break;
-		set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD,
+		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD,
+				     MSR_TYPE_R, 0);
+		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD,
 				     MSR_TYPE_W, 1);
 		break;
 	case MSR_AMD64_VIRT_SPEC_CTRL: