@@ -1131,6 +1131,7 @@ struct kvm_x86_ops {
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
bool (*desc_ctrl_supported)(void);
+ void (*control_desc_intercept)(struct kvm_vcpu *vcpu, bool enable);
void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
@@ -1635,6 +1635,31 @@ static bool svm_desc_ctrl_supported(void)
return true;
}
+static void svm_control_desc_intercept(struct kvm_vcpu *vcpu, bool enable)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ if (enable) {
+ svm_set_intercept(svm, INTERCEPT_STORE_IDTR);
+ svm_set_intercept(svm, INTERCEPT_STORE_GDTR);
+ svm_set_intercept(svm, INTERCEPT_STORE_LDTR);
+ svm_set_intercept(svm, INTERCEPT_STORE_TR);
+ svm_set_intercept(svm, INTERCEPT_LOAD_IDTR);
+ svm_set_intercept(svm, INTERCEPT_LOAD_GDTR);
+ svm_set_intercept(svm, INTERCEPT_LOAD_LDTR);
+ svm_set_intercept(svm, INTERCEPT_LOAD_TR);
+ } else {
+ svm_clr_intercept(svm, INTERCEPT_STORE_IDTR);
+ svm_clr_intercept(svm, INTERCEPT_STORE_GDTR);
+ svm_clr_intercept(svm, INTERCEPT_STORE_LDTR);
+ svm_clr_intercept(svm, INTERCEPT_STORE_TR);
+ svm_clr_intercept(svm, INTERCEPT_LOAD_IDTR);
+ svm_clr_intercept(svm, INTERCEPT_LOAD_GDTR);
+ svm_clr_intercept(svm, INTERCEPT_LOAD_LDTR);
+ svm_clr_intercept(svm, INTERCEPT_LOAD_TR);
+ }
+}
+
static void update_cr0_intercept(struct vcpu_svm *svm)
{
ulong gcr0 = svm->vcpu.arch.cr0;
@@ -4281,6 +4306,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.get_gdt = svm_get_gdt,
.set_gdt = svm_set_gdt,
.desc_ctrl_supported = svm_desc_ctrl_supported,
+ .control_desc_intercept = svm_control_desc_intercept,
.set_dr7 = svm_set_dr7,
.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
.cache_reg = svm_cache_reg,
@@ -3120,6 +3120,16 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
vmcs_writel(GUEST_CR3, guest_cr3);
}
+static void vmx_control_desc_intercept(struct kvm_vcpu *vcpu, bool enable)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (enable)
+ secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
+ else
+ secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
+}
+
static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
/*
@@ -3157,11 +3167,11 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
if (cr4 & X86_CR4_UMIP) {
- secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
+ vmx_control_desc_intercept(vcpu, true);
hw_cr4 &= ~X86_CR4_UMIP;
} else if (!is_guest_mode(vcpu) ||
!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) {
- secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
+ vmx_control_desc_intercept(vcpu, false);
}
}
@@ -7657,6 +7667,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt,
.desc_ctrl_supported = vmx_desc_ctrl_supported,
+ .control_desc_intercept = vmx_control_desc_intercept,
.set_dr7 = vmx_set_dr7,
.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
.cache_reg = vmx_cache_reg,
This function is needed to intercept descriptor-table registers access. Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm/svm.c | 26 ++++++++++++++++++++++++++ arch/x86/kvm/vmx/vmx.c | 15 +++++++++++++-- 3 files changed, 40 insertions(+), 2 deletions(-)