@@ -72,6 +72,7 @@ struct memslot_desc {
static struct event_cnt {
int aborts;
+ int mmio_exits;
int fail_vcpu_runs;
int uffd_faults;
/* uffd_faults is incremented from multiple threads. */
@@ -89,6 +90,8 @@ struct test_desc {
int (*uffd_test_handler)(int mode, int uffd, struct uffd_msg *msg);
void (*dabt_handler)(struct ex_regs *regs);
void (*iabt_handler)(struct ex_regs *regs);
+ void (*mmio_handler)(struct kvm_run *run);
+ void (*fail_vcpu_run_handler)(int ret);
uint32_t pt_memslot_flags;
uint32_t test_memslot_flags;
bool skip;
@@ -318,6 +321,20 @@ static void guest_code(struct test_desc *test)
GUEST_DONE();
}
+static void dabt_s1ptw_on_ro_memslot_handler(struct ex_regs *regs)
+{
+ GUEST_ASSERT_EQ(read_sysreg(far_el1), TEST_GVA);
+ events.aborts += 1;
+ GUEST_SYNC(CMD_RECREATE_PT_MEMSLOT_WR);
+}
+
+static void iabt_s1ptw_on_ro_memslot_handler(struct ex_regs *regs)
+{
+ GUEST_ASSERT_EQ(regs->pc, TEST_EXEC_GVA);
+ events.aborts += 1;
+ GUEST_SYNC(CMD_RECREATE_PT_MEMSLOT_WR);
+}
+
static void no_dabt_handler(struct ex_regs *regs)
{
GUEST_ASSERT_1(false, read_sysreg(far_el1));
@@ -403,6 +420,32 @@ static bool punch_hole_in_memslot(struct kvm_vm *vm,
return true;
}
+static void recreate_memslot(struct kvm_vm *vm, struct memslot_desc *ms,
+ uint32_t flags)
+{
+ vm_set_user_memory_region(vm, ms->idx, 0, ms->gpa, 0, ms->hva);
+ vm_set_user_memory_region(vm, ms->idx, flags, ms->gpa, ms->size, ms->hva);
+}
+
+static void mmio_on_test_gpa_handler(struct kvm_run *run)
+{
+ ASSERT_EQ(run->mmio.phys_addr, memslot[TEST].gpa);
+
+ memcpy(memslot[TEST].hva, run->mmio.data, run->mmio.len);
+ events.mmio_exits += 1;
+}
+
+static void mmio_no_handler(struct kvm_run *run)
+{
+ uint64_t data;
+
+ memcpy(&data, run->mmio.data, sizeof(data));
+ pr_debug("addr=%lld len=%d w=%d data=%lx\n",
+ run->mmio.phys_addr, run->mmio.len,
+ run->mmio.is_write, data);
+ TEST_FAIL("There was no MMIO exit expected.");
+}
+
static bool check_write_in_dirty_log(struct kvm_vm *vm,
struct memslot_desc *ms, uint64_t host_pg_nr)
{
@@ -440,6 +483,8 @@ static bool handle_cmd(struct kvm_vm *vm, int cmd)
if (cmd & CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG)
TEST_ASSERT(!check_write_in_dirty_log(vm, &memslot[PT], 0),
"Unexpected s1ptw write in dirty log");
+ if (cmd & CMD_RECREATE_PT_MEMSLOT_WR)
+ recreate_memslot(vm, &memslot[PT], 0);
return continue_test;
}
@@ -456,6 +501,13 @@ void fail_vcpu_run_no_handler(int ret)
TEST_FAIL("Unexpected vcpu run failure\n");
}
+void fail_vcpu_run_mmio_no_syndrome_handler(int ret)
+{
+ TEST_ASSERT(errno == ENOSYS,
+ "The mmio handler should have returned not implemented.");
+ events.fail_vcpu_runs += 1;
+}
+
extern unsigned char __exec_test;
void noinline __return_0x77(void)
@@ -625,10 +677,21 @@ static void setup_uffd(enum vm_guest_mode mode, struct test_params *p,
test->uffd_test_handler);
}
+static void setup_default_handlers(struct test_desc *test)
+{
+ if (!test->mmio_handler)
+ test->mmio_handler = mmio_no_handler;
+
+ if (!test->fail_vcpu_run_handler)
+ test->fail_vcpu_run_handler = fail_vcpu_run_no_handler;
+}
+
static void check_event_counts(struct test_desc *test)
{
ASSERT_EQ(test->expected_events.aborts, events.aborts);
ASSERT_EQ(test->expected_events.uffd_faults, events.uffd_faults);
+ ASSERT_EQ(test->expected_events.mmio_exits, events.mmio_exits);
+ ASSERT_EQ(test->expected_events.fail_vcpu_runs, events.fail_vcpu_runs);
}
static void free_uffd(struct test_desc *test, struct uffd_desc **uffd)
@@ -661,12 +724,20 @@ static void reset_event_counts(void)
static bool vcpu_run_loop(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
struct test_desc *test)
{
+ struct kvm_run *run;
bool skip_test = false;
struct ucall uc;
- int stage;
+ int stage, ret;
+
+ run = vcpu->run;
for (stage = 0; ; stage++) {
- vcpu_run(vcpu);
+ ret = _vcpu_run(vcpu);
+ if (ret) {
+ test->fail_vcpu_run_handler(ret);
+ pr_debug("Done.\n");
+ goto done;
+ }
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
@@ -684,6 +755,10 @@ static bool vcpu_run_loop(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
case UCALL_DONE:
pr_debug("Done.\n");
goto done;
+ case UCALL_NONE:
+ if (run->exit_reason == KVM_EXIT_MMIO)
+ test->mmio_handler(run);
+ break;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
@@ -709,6 +784,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
ucall_init(vm, NULL);
reset_event_counts();
+ setup_abort_handlers(vm, vcpu, test);
setup_memslots(vm, mode, p);
/*
@@ -719,7 +795,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
*/
load_exec_code_for_test();
setup_uffd(mode, p, uffd);
- setup_abort_handlers(vm, vcpu, test);
+ setup_default_handlers(test);
vcpu_args_set(vcpu, 1, test);
sync_global_to_guest(vm, memslot);
@@ -810,6 +886,32 @@ static void help(char *name)
.expected_events = { 0 }, \
}
+#define TEST_RO_MEMSLOT(_access, _mmio_handler, _mmio_exits, \
+ _iabt_handler, _dabt_handler, _aborts) \
+{ \
+ .name = SCAT3(ro_memslot, _access, _with_af), \
+ .test_memslot_flags = KVM_MEM_READONLY, \
+ .pt_memslot_flags = KVM_MEM_READONLY, \
+ .guest_prepare = { _PREPARE(_access) }, \
+ .guest_test = _access, \
+ .mmio_handler = _mmio_handler, \
+ .iabt_handler = _iabt_handler, \
+ .dabt_handler = _dabt_handler, \
+ .expected_events = { .mmio_exits = _mmio_exits, \
+ .aborts = _aborts}, \
+}
+
+#define TEST_RO_MEMSLOT_NO_SYNDROME(_access) \
+{ \
+ .name = SCAT2(ro_memslot_no_syndrome, _access), \
+ .test_memslot_flags = KVM_MEM_READONLY, \
+ .pt_memslot_flags = KVM_MEM_READONLY, \
+ .guest_test = _access, \
+ .dabt_handler = dabt_s1ptw_on_ro_memslot_handler, \
+ .fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
+ .expected_events = { .aborts = 1, .fail_vcpu_runs = 1 }, \
+}
+
static struct test_desc tests[] = {
/* Check that HW is setting the Access Flag (AF) (sanity checks). */
TEST_ACCESS(guest_read64, with_af, CMD_NONE),
@@ -877,6 +979,32 @@ static struct test_desc tests[] = {
TEST_DIRTY_LOG(guest_dc_zva, with_af, guest_check_write_in_dirty_log),
TEST_DIRTY_LOG(guest_st_preidx, with_af, guest_check_write_in_dirty_log),
+ /*
+ * Try accesses when both the test and PT memslots are marked read-only
+ * (with KVM_MEM_READONLY). The S1PTW results in an guest abort, whose
+ * handler asks the host to recreate the memslot as writable. Note that
+ * guests would typically panic as there's no way of asking the VMM to
+ * perform the write for the guest (or make the memslot writable). The
+ * instruction then is executed: writes with a syndrome result in an
+ * MMIO exit, writes with no syndrome (e.g., CAS) result in a failed
+ * vcpu run, and reads/execs with and without syndroms do not fault.
+ * Check that the expected aborts, failed vcpu runs, mmio exits
+ * actually happen.
+ */
+ TEST_RO_MEMSLOT(guest_read64, 0, 0, 0,
+ dabt_s1ptw_on_ro_memslot_handler, 1),
+ TEST_RO_MEMSLOT(guest_ld_preidx, 0, 0, 0,
+ dabt_s1ptw_on_ro_memslot_handler, 1),
+ TEST_RO_MEMSLOT(guest_at, 0, 0, 0,
+ dabt_s1ptw_on_ro_memslot_handler, 1),
+ TEST_RO_MEMSLOT(guest_exec, 0, 0, iabt_s1ptw_on_ro_memslot_handler,
+ 0, 1),
+ TEST_RO_MEMSLOT(guest_write64, mmio_on_test_gpa_handler, 1, 0,
+ dabt_s1ptw_on_ro_memslot_handler, 1),
+ TEST_RO_MEMSLOT_NO_SYNDROME(guest_dc_zva),
+ TEST_RO_MEMSLOT_NO_SYNDROME(guest_cas),
+ TEST_RO_MEMSLOT_NO_SYNDROME(guest_st_preidx),
+
{ 0 }
};
Add some readonly memslot tests into page_fault_test. Mark the data and/or page-table memslots as readonly, perform some accesses, and check that the right fault is triggered when expected (e.g., a store with no write-back should lead to an mmio exit). Signed-off-by: Ricardo Koller <ricarkol@google.com> --- .../selftests/kvm/aarch64/page_fault_test.c | 134 +++++++++++++++++- 1 file changed, 131 insertions(+), 3 deletions(-)