@@ -1106,6 +1106,7 @@ struct kvm_x86_ops {
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
bool (*desc_ctrl_supported)(void);
+ void (*control_desc_intercept)(struct kvm_vcpu *vcpu, bool enable);
u64 (*get_dr6)(struct kvm_vcpu *vcpu);
void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
@@ -7447,6 +7447,31 @@ static void svm_control_cr3_intercept(struct kvm_vcpu *vcpu, int type,
clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
}
+static void svm_control_desc_intercept(struct kvm_vcpu *vcpu, bool enable)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ if (enable) {
+ set_intercept(svm, INTERCEPT_STORE_IDTR);
+ set_intercept(svm, INTERCEPT_STORE_GDTR);
+ set_intercept(svm, INTERCEPT_STORE_LDTR);
+ set_intercept(svm, INTERCEPT_STORE_TR);
+ set_intercept(svm, INTERCEPT_LOAD_IDTR);
+ set_intercept(svm, INTERCEPT_LOAD_GDTR);
+ set_intercept(svm, INTERCEPT_LOAD_LDTR);
+ set_intercept(svm, INTERCEPT_LOAD_TR);
+ } else {
+ clr_intercept(svm, INTERCEPT_STORE_IDTR);
+ clr_intercept(svm, INTERCEPT_STORE_GDTR);
+ clr_intercept(svm, INTERCEPT_STORE_LDTR);
+ clr_intercept(svm, INTERCEPT_STORE_TR);
+ clr_intercept(svm, INTERCEPT_LOAD_IDTR);
+ clr_intercept(svm, INTERCEPT_LOAD_GDTR);
+ clr_intercept(svm, INTERCEPT_LOAD_LDTR);
+ clr_intercept(svm, INTERCEPT_LOAD_TR);
+ }
+}
+
static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -7496,6 +7521,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.get_gdt = svm_get_gdt,
.set_gdt = svm_set_gdt,
.desc_ctrl_supported = svm_desc_ctrl_supported,
+ .control_desc_intercept = svm_control_desc_intercept,
.get_dr6 = svm_get_dr6,
.set_dr6 = svm_set_dr6,
.set_dr7 = svm_set_dr7,
@@ -3034,6 +3034,16 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
return eptp;
}
+static void vmx_control_desc_intercept(struct kvm_vcpu *vcpu, bool enable)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (enable)
+ secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
+ else
+ secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
+}
+
void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
struct kvm *kvm = vcpu->kvm;
@@ -3090,11 +3100,11 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
if (cr4 & X86_CR4_UMIP) {
- secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
+ vmx_control_desc_intercept(vcpu, true);
hw_cr4 &= ~X86_CR4_UMIP;
} else if (!is_guest_mode(vcpu) ||
!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) {
- secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
+ vmx_control_desc_intercept(vcpu, false);
}
}
@@ -7920,6 +7930,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt,
.desc_ctrl_supported = vmx_desc_ctrl_supported,
+ .control_desc_intercept = vmx_control_desc_intercept,
.get_dr6 = vmx_get_dr6,
.set_dr6 = vmx_set_dr6,
.set_dr7 = vmx_set_dr7,
This function is needed for the KVMI_EVENT_DESCRIPTOR event. Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm.c | 26 ++++++++++++++++++++++++++ arch/x86/kvm/vmx/vmx.c | 15 +++++++++++++-- 3 files changed, 40 insertions(+), 2 deletions(-)