@@ -1379,6 +1379,41 @@ static int tdx_emulate_mmio(struct kvm_vcpu *vcpu)
return 1;
}
+static int tdx_emulate_rdmsr(struct kvm_vcpu *vcpu)
+{
+ u32 index = tdvmcall_a0_read(vcpu);
+ u64 data;
+
+ if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ) ||
+ kvm_get_msr(vcpu, index, &data)) {
+ trace_kvm_msr_read_ex(index);
+ tdvmcall_set_return_code(vcpu, TDG_VP_VMCALL_INVALID_OPERAND);
+ return 1;
+ }
+ trace_kvm_msr_read(index, data);
+
+ tdvmcall_set_return_code(vcpu, TDG_VP_VMCALL_SUCCESS);
+ tdvmcall_set_return_val(vcpu, data);
+ return 1;
+}
+
+static int tdx_emulate_wrmsr(struct kvm_vcpu *vcpu)
+{
+ u32 index = tdvmcall_a0_read(vcpu);
+ u64 data = tdvmcall_a1_read(vcpu);
+
+ if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE) ||
+ kvm_set_msr(vcpu, index, data)) {
+ trace_kvm_msr_write_ex(index, data);
+ tdvmcall_set_return_code(vcpu, TDG_VP_VMCALL_INVALID_OPERAND);
+ return 1;
+ }
+
+ trace_kvm_msr_write(index, data);
+ tdvmcall_set_return_code(vcpu, TDG_VP_VMCALL_SUCCESS);
+ return 1;
+}
+
static int handle_tdvmcall(struct kvm_vcpu *vcpu)
{
if (tdvmcall_exit_type(vcpu))
@@ -1393,6 +1428,10 @@ static int handle_tdvmcall(struct kvm_vcpu *vcpu)
return tdx_emulate_io(vcpu);
case EXIT_REASON_EPT_VIOLATION:
return tdx_emulate_mmio(vcpu);
+ case EXIT_REASON_MSR_READ:
+ return tdx_emulate_rdmsr(vcpu);
+ case EXIT_REASON_MSR_WRITE:
+ return tdx_emulate_wrmsr(vcpu);
default:
break;
}