@@ -25,5 +25,7 @@ uint64_t tdg_vp_vmcall_instruction_wrmsr(uint64_t index, uint64_t value);
uint64_t tdg_vp_vmcall_instruction_hlt(uint64_t interrupt_blocked_flag);
uint64_t tdg_vp_vmcall_ve_request_mmio_read(uint64_t address, uint64_t size,
uint64_t *data_out);
+uint64_t tdg_vp_vmcall_ve_request_mmio_write(uint64_t address, uint64_t size,
+ uint64_t data_in);
#endif // SELFTEST_TDX_TDX_H
@@ -123,3 +123,17 @@ uint64_t tdg_vp_vmcall_ve_request_mmio_read(uint64_t address, uint64_t size,
return ret;
}
+
+uint64_t tdg_vp_vmcall_ve_request_mmio_write(uint64_t address, uint64_t size,
+ uint64_t data_in)
+{
+ struct tdx_hypercall_args args = {
+ .r11 = TDG_VP_VMCALL_VE_REQUEST_MMIO,
+ .r12 = size,
+ .r13 = MMIO_WRITE,
+ .r14 = address,
+ .r15 = data_in,
+ };
+
+ return __tdx_hypercall(&args, 0);
+}
@@ -804,6 +804,87 @@ void verify_mmio_reads(void)
printf("\t ... PASSED\n");
}
+void guest_mmio_writes(void)
+{
+ uint64_t mmio_test_addr = TDX_MMIO_TEST_ADDR | tdx_s_bit;
+ uint64_t ret;
+
+ ret = tdg_vp_vmcall_ve_request_mmio_write(mmio_test_addr, 1, 0x12);
+ tdx_assert_error(ret);
+
+ ret = tdg_vp_vmcall_ve_request_mmio_write(mmio_test_addr, 2, 0x1234);
+ tdx_assert_error(ret);
+
+ ret = tdg_vp_vmcall_ve_request_mmio_write(mmio_test_addr, 4, 0x12345678);
+ tdx_assert_error(ret);
+
+ ret = tdg_vp_vmcall_ve_request_mmio_write(mmio_test_addr, 8, 0x1234567890ABCDEF);
+ tdx_assert_error(ret);
+
+ /* Make sure host and guest are synced to the same point of execution */
+ tdx_test_report_to_user_space(MMIO_SYNC_VALUE);
+
+ /* Write across page boundary. */
+ ret = tdg_vp_vmcall_ve_request_mmio_write(PAGE_SIZE - 1, 8, 0);
+ tdx_assert_error(ret);
+
+ tdx_test_success();
+}
+
+/*
+ * Verifies guest MMIO writes.
+ */
+void verify_mmio_writes(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ uint64_t byte_8;
+ uint32_t byte_4;
+ uint16_t byte_2;
+ uint8_t byte_1;
+
+ vm = td_create();
+ td_initialize(vm, VM_MEM_SRC_ANONYMOUS, 0);
+ vcpu = td_vcpu_add(vm, 0, guest_mmio_writes);
+ td_finalize(vm);
+
+ printf("Verifying TD MMIO writes:\n");
+
+ tdx_run(vcpu);
+ tdx_test_assert_mmio(vcpu, TDX_MMIO_TEST_ADDR, 1, MMIO_WRITE);
+ byte_1 = *(uint8_t *)(vcpu->run->mmio.data);
+
+ tdx_run(vcpu);
+ tdx_test_assert_mmio(vcpu, TDX_MMIO_TEST_ADDR, 2, MMIO_WRITE);
+ byte_2 = *(uint16_t *)(vcpu->run->mmio.data);
+
+ tdx_run(vcpu);
+ tdx_test_assert_mmio(vcpu, TDX_MMIO_TEST_ADDR, 4, MMIO_WRITE);
+ byte_4 = *(uint32_t *)(vcpu->run->mmio.data);
+
+ tdx_run(vcpu);
+ tdx_test_assert_mmio(vcpu, TDX_MMIO_TEST_ADDR, 8, MMIO_WRITE);
+ byte_8 = *(uint64_t *)(vcpu->run->mmio.data);
+
+ TEST_ASSERT_EQ(byte_1, 0x12);
+ TEST_ASSERT_EQ(byte_2, 0x1234);
+ TEST_ASSERT_EQ(byte_4, 0x12345678);
+ TEST_ASSERT_EQ(byte_8, 0x1234567890ABCDEF);
+
+ tdx_run(vcpu);
+ TEST_ASSERT_EQ(tdx_test_read_report_from_guest(vcpu), MMIO_SYNC_VALUE);
+
+ td_vcpu_run(vcpu);
+ TEST_ASSERT_EQ(vcpu->run->exit_reason, KVM_EXIT_SYSTEM_EVENT);
+ TEST_ASSERT_EQ(vcpu->run->system_event.data[12], TDG_VP_VMCALL_INVALID_OPERAND);
+
+ tdx_run(vcpu);
+ tdx_test_assert_success(vcpu);
+
+ kvm_vm_free(vm);
+ printf("\t ... PASSED\n");
+}
+
int main(int argc, char **argv)
{
ksft_print_header();
@@ -811,7 +892,7 @@ int main(int argc, char **argv)
if (!is_tdx_enabled())
ksft_exit_skip("TDX is not supported by the KVM. Exiting.\n");
- ksft_set_plan(11);
+ ksft_set_plan(12);
ksft_test_result(!run_in_new_process(&verify_td_lifecycle),
"verify_td_lifecycle\n");
ksft_test_result(!run_in_new_process(&verify_report_fatal_error),
@@ -834,6 +915,8 @@ int main(int argc, char **argv)
"verify_guest_hlt\n");
ksft_test_result(!run_in_new_process(&verify_mmio_reads),
"verify_mmio_reads\n");
+ ksft_test_result(!run_in_new_process(&verify_mmio_writes),
+ "verify_mmio_writes\n");
ksft_finished();
return 0;