@@ -1202,6 +1202,71 @@ static int tdx_emulate_hlt(struct kvm_vcpu *vcpu)
return kvm_emulate_halt_noskip(vcpu);
}
+static int tdx_complete_pio_out(struct kvm_vcpu *vcpu)
+{
+ tdvmcall_set_return_code(vcpu, TDG_VP_VMCALL_SUCCESS);
+ tdvmcall_set_return_val(vcpu, 0);
+ return 1;
+}
+
+static int tdx_complete_pio_in(struct kvm_vcpu *vcpu)
+{
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
+ unsigned long val = 0;
+ int ret;
+
+ WARN_ON_ONCE(vcpu->arch.pio.count != 1);
+
+ ret = ctxt->ops->pio_in_emulated(ctxt, vcpu->arch.pio.size,
+ vcpu->arch.pio.port, &val, 1);
+ WARN_ON_ONCE(!ret);
+
+ tdvmcall_set_return_code(vcpu, TDG_VP_VMCALL_SUCCESS);
+ tdvmcall_set_return_val(vcpu, val);
+
+ return 1;
+}
+
+static int tdx_emulate_io(struct kvm_vcpu *vcpu)
+{
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
+ unsigned long val = 0;
+ unsigned int port;
+ int size, ret;
+ bool write;
+
+ ++vcpu->stat.io_exits;
+
+ size = tdvmcall_a0_read(vcpu);
+ write = tdvmcall_a1_read(vcpu);
+ port = tdvmcall_a2_read(vcpu);
+
+ if (size != 1 && size != 2 && size != 4) {
+ tdvmcall_set_return_code(vcpu, TDG_VP_VMCALL_INVALID_OPERAND);
+ return 1;
+ }
+
+ if (write) {
+ val = tdvmcall_a3_read(vcpu);
+ ret = ctxt->ops->pio_out_emulated(ctxt, size, port, &val, 1);
+
+ /* No need for a complete_userspace_io callback. */
+ vcpu->arch.pio.count = 0;
+ } else
+ ret = ctxt->ops->pio_in_emulated(ctxt, size, port, &val, 1);
+
+ if (ret)
+ tdvmcall_set_return_val(vcpu, val);
+ else {
+ if (write)
+ vcpu->arch.complete_userspace_io = tdx_complete_pio_out;
+ else
+ vcpu->arch.complete_userspace_io = tdx_complete_pio_in;
+ }
+
+ return ret;
+}
+
static int handle_tdvmcall(struct kvm_vcpu *vcpu)
{
if (tdvmcall_exit_type(vcpu))
@@ -1212,6 +1277,8 @@ static int handle_tdvmcall(struct kvm_vcpu *vcpu)
return tdx_emulate_cpuid(vcpu);
case EXIT_REASON_HLT:
return tdx_emulate_hlt(vcpu);
+ case EXIT_REASON_IO_INSTRUCTION:
+ return tdx_emulate_io(vcpu);
default:
break;
}