@@ -2990,6 +2990,10 @@ static int wrmsr_interception(struct vcpu_svm *svm)
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
if (svm_set_msr(&svm->vcpu, ecx, data)) {
trace_kvm_msr_write_ex(ecx, data);
+ if (svm->vcpu.run->exit_reason == KVM_EXIT_X86_MSR_OP) {
+ skip_emulated_instruction(&svm->vcpu);
+ return 0;
+ }
kvm_inject_gp(&svm->vcpu, 0);
} else {
trace_kvm_msr_write(ecx, data);
@@ -3385,6 +3385,10 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
if (vmx_set_msr(vcpu, ecx, data) != 0) {
trace_kvm_msr_write_ex(ecx, data);
+ if (vcpu->run->exit_reason == KVM_EXIT_X86_MSR_OP) {
+ skip_emulated_instruction(vcpu);
+ return 0;
+ }
kvm_inject_gp(vcpu, 0);
return 1;
}
@@ -1543,6 +1543,13 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
kvm_read_guest(vcpu->kvm, data, &area_desc, sizeof(area_desc));
area_desc.result = 0xF;
+
+ if (vcpu->kvm->register_mem_area_uspace) {
+ vcpu->run->exit_reason = KVM_EXIT_X86_MSR_OP;
+ vcpu->run->msr.msr_data = data;
+ return 1;
+ }
+rma_out:
kvm_write_guest(vcpu->kvm, data, &area_desc, sizeof(area_desc));
break;
}
@@ -1974,6 +1981,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_X86_ROBUST_SINGLESTEP:
case KVM_CAP_XSAVE:
case KVM_CAP_ASYNC_PF:
+ case KVM_CAP_REGISTER_MEM_AREA:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -3555,6 +3563,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = 0;
break;
}
+ case KVM_USERSPACE_REGISTER_MEM_AREA:
+ kvm->register_mem_area_uspace = 1;
+ break;
default:
;