@@ -122,6 +122,7 @@ KVM_X86_OP_NULL(migrate_timers)
KVM_X86_OP(msr_filter_changed)
KVM_X86_OP_NULL(complete_emulated_msr)
KVM_X86_OP(bp_intercepted)
+KVM_X86_OP(control_cr3_intercept)
#undef KVM_X86_OP
#undef KVM_X86_OP_NULL
@@ -146,6 +146,10 @@
#define KVM_NR_FIXED_MTRR_REGION 88
#define KVM_NR_VAR_MTRR 8
+#define CR_TYPE_R 1
+#define CR_TYPE_W 2
+#define CR_TYPE_RW 3
+
#define ASYNC_PF_PER_VCPU 64
enum kvm_reg {
@@ -1337,6 +1341,8 @@ struct kvm_x86_ops {
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0);
void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
+ void (*control_cr3_intercept)(struct kvm_vcpu *vcpu, int type,
+ bool enable);
int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
@@ -1850,6 +1850,19 @@ void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
kvm_update_cpuid_runtime(vcpu);
}
+static void svm_control_cr3_intercept(struct kvm_vcpu *vcpu, int type,
+ bool enable)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ if (type & CR_TYPE_R)
+ enable ? svm_set_intercept(svm, INTERCEPT_CR3_READ) :
+ svm_clr_intercept(svm, INTERCEPT_CR3_READ);
+ if (type & CR_TYPE_W)
+ enable ? svm_set_intercept(svm, INTERCEPT_CR3_WRITE) :
+ svm_clr_intercept(svm, INTERCEPT_CR3_WRITE);
+}
+
static void svm_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
@@ -4620,6 +4633,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.set_cr0 = svm_set_cr0,
.is_valid_cr4 = svm_is_valid_cr4,
.set_cr4 = svm_set_cr4,
+ .control_cr3_intercept = svm_control_cr3_intercept,
.set_efer = svm_set_efer,
.get_idt = svm_get_idt,
.set_idt = svm_set_idt,
@@ -3004,6 +3004,23 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu)
#define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
CPU_BASED_CR3_STORE_EXITING)
+static void vmx_control_cr3_intercept(struct kvm_vcpu *vcpu, int type,
+ bool enable)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 cr3_exec_control = 0;
+
+ if (type & CR_TYPE_R)
+ cr3_exec_control |= CPU_BASED_CR3_STORE_EXITING;
+ if (type & CR_TYPE_W)
+ cr3_exec_control |= CPU_BASED_CR3_LOAD_EXITING;
+
+ if (enable)
+ exec_controls_setbit(vmx, cr3_exec_control);
+ else
+ exec_controls_clearbit(vmx, cr3_exec_control);
+}
+
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -7604,6 +7621,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.set_cr0 = vmx_set_cr0,
.is_valid_cr4 = vmx_is_valid_cr4,
.set_cr4 = vmx_set_cr4,
+ .control_cr3_intercept = vmx_control_cr3_intercept,
.set_efer = vmx_set_efer,
.get_idt = vmx_get_idt,
.set_idt = vmx_set_idt,