@@ -1048,6 +1048,7 @@ struct kvm_x86_ops {
void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
void (*setup_mce)(struct kvm_vcpu *vcpu);
+ void (*msr_intercept)(unsigned int msr, bool enable);
};
struct kvm_arch_async_pf {
@@ -1429,4 +1430,5 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
#endif
}
+void kvm_arch_msr_intercept(unsigned int msr, bool enable);
#endif /* _ASM_X86_KVM_HOST_H */
@@ -5262,6 +5262,11 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
vcpu->arch.mcg_cap &= 0x1ff;
}
+static void svm_msr_intercept(unsigned int msr, bool enable)
+{
+ set_msr_interception(msrpm, msr, enable, enable);
+}
+
static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -5374,6 +5379,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.deliver_posted_interrupt = svm_deliver_avic_intr,
.update_pi_irte = svm_update_pi_irte,
.setup_mce = svm_setup_mce,
+
+ .msr_intercept = svm_msr_intercept,
};
static int __init svm_init(void)
@@ -11457,6 +11457,56 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
~FEATURE_CONTROL_LMCE;
}
+static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type)
+{
+ int f = sizeof(unsigned long);
+
+ if (!cpu_has_vmx_msr_bitmap())
+ return;
+
+ /*
+ * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
+ * have the write-low and read-high bitmap offsets the wrong way round.
+ * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
+ */
+ if (msr <= 0x1fff) {
+ if (type & MSR_TYPE_R)
+ /* read-low */
+ __set_bit(msr, msr_bitmap + 0x000 / f);
+
+ if (type & MSR_TYPE_W)
+ /* write-low */
+ __set_bit(msr, msr_bitmap + 0x800 / f);
+
+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+ msr &= 0x1fff;
+ if (type & MSR_TYPE_R)
+ /* read-high */
+ __set_bit(msr, msr_bitmap + 0x400 / f);
+
+ if (type & MSR_TYPE_W)
+ /* write-high */
+ __set_bit(msr, msr_bitmap + 0xc00 / f);
+
+ }
+}
+
+static void vmx_msr_intercept(unsigned int msr, bool enabled)
+{
+ if (enabled) {
+ __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode, msr,
+ MSR_TYPE_W);
+ __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy, msr,
+ MSR_TYPE_W);
+ } else {
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr,
+ MSR_TYPE_W);
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr,
+ MSR_TYPE_W);
+ }
+}
+
static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -11584,6 +11634,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
#endif
.setup_mce = vmx_setup_mce,
+
+ .msr_intercept = vmx_msr_intercept,
};
static int __init vmx_init(void)
@@ -8734,6 +8734,12 @@ bool kvm_vector_hashing_enabled(void)
}
EXPORT_SYMBOL_GPL(kvm_vector_hashing_enabled);
+void kvm_arch_msr_intercept(unsigned int msr, bool enable)
+{
+ kvm_x86_ops->msr_intercept(msr, enable);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_msr_intercept);
+
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);