diff mbox series

[7/7] KVM: selftests: Add "delete" testcase to set_memory_region_test

Message ID 20200320205546.2396-8-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: Fix memslot use-after-free bug | expand

Commit Message

Sean Christopherson March 20, 2020, 8:55 p.m. UTC
Add coverate for running a guest with no memslots, and for deleting
memslots while the guest is running.  Enhance the test to use, and
expect, a unique value for MMIO reads, e.g. to verify each stage of
the test.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 .../kvm/x86_64/set_memory_region_test.c       | 122 ++++++++++++++++--
 1 file changed, 108 insertions(+), 14 deletions(-)

Comments

Peter Xu March 23, 2020, 7:06 p.m. UTC | #1
On Fri, Mar 20, 2020 at 01:55:46PM -0700, Sean Christopherson wrote:
> Add coverate for running a guest with no memslots, and for deleting
> memslots while the guest is running.  Enhance the test to use, and
> expect, a unique value for MMIO reads, e.g. to verify each stage of
> the test.
> 
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  .../kvm/x86_64/set_memory_region_test.c       | 122 ++++++++++++++++--
>  1 file changed, 108 insertions(+), 14 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/x86_64/set_memory_region_test.c b/tools/testing/selftests/kvm/x86_64/set_memory_region_test.c
> index c6691cff4e19..44aed8ac932b 100644
> --- a/tools/testing/selftests/kvm/x86_64/set_memory_region_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/set_memory_region_test.c
> @@ -26,42 +26,109 @@
>  #define MEM_REGION_SIZE		0x200000
>  #define MEM_REGION_SLOT		10
>  
> -static void guest_code(void)
> +static const uint64_t MMIO_VAL = 0xbeefull;
> +
> +extern const uint64_t final_rip_start;
> +extern const uint64_t final_rip_end;
> +
> +static inline uint64_t guest_spin_on_val(uint64_t spin_val)
>  {
>  	uint64_t val;
>  
>  	do {
>  		val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA));
> -	} while (!val);
> +	} while (val == spin_val);
> +	return val;
> +}
>  
> -	if (val != 1)
> -		ucall(UCALL_ABORT, 1, val);
> +static void guest_code(void)
> +{
> +	uint64_t val;
>  
> -	GUEST_DONE();
> +	/*
> +	 * Spin until the memory region is moved to a misaligned address.  This
> +	 * may or may not trigger MMIO, as the window where the memslot is
> +	 * invalid is quite small.
> +	 */
> +	val = guest_spin_on_val(0);
> +	GUEST_ASSERT(val == 1 || val == MMIO_VAL);
> +
> +	/* Spin until the memory region is realigned. */
> +	GUEST_ASSERT(guest_spin_on_val(MMIO_VAL) == 1);

IIUC ideally we should do GUEST_SYNC() after each GUEST_ASSERT() to
make sure the two threads are in sync.  Otherwise e.g. there's no
guarantee that the main thread won't run too fast to quickly remove
the memslot and re-add it back before the guest_spin_on_val() starts
above, then the assert could trigger when it reads the value as zero.

> +
> +	/* Spin until the memory region is deleted. */
> +	GUEST_ASSERT(guest_spin_on_val(1) == MMIO_VAL);
> +
> +	/* Spin until the memory region is recreated. */
> +	GUEST_ASSERT(guest_spin_on_val(MMIO_VAL) == 0);
> +
> +	/* Spin until the memory region is deleted. */
> +	GUEST_ASSERT(guest_spin_on_val(0) == MMIO_VAL);
> +
> +	asm("1:\n\t"
> +	    ".pushsection .rodata\n\t"
> +	    ".global final_rip_start\n\t"
> +	    "final_rip_start: .quad 1b\n\t"
> +	    ".popsection");
> +
> +	/* Spin indefinitely (until the code memslot is deleted). */
> +	guest_spin_on_val(MMIO_VAL);
> +
> +	asm("1:\n\t"
> +	    ".pushsection .rodata\n\t"
> +	    ".global final_rip_end\n\t"
> +	    "final_rip_end: .quad 1b\n\t"
> +	    ".popsection");
> +
> +	GUEST_ASSERT(0);
>  }
>  
>  static void *vcpu_worker(void *data)
>  {
>  	struct kvm_vm *vm = data;
> +	struct kvm_regs regs;
>  	struct kvm_run *run;
>  	struct ucall uc;
> -	uint64_t cmd;
>  
>  	/*
>  	 * Loop until the guest is done.  Re-enter the guest on all MMIO exits,
> -	 * which will occur if the guest attempts to access a memslot while it
> -	 * is being moved.
> +	 * which will occur if the guest attempts to access a memslot after it
> +	 * has been deleted or while it is being moved .
>  	 */
>  	run = vcpu_state(vm, VCPU_ID);
> -	do {
> +
> +	memcpy(run->mmio.data, &MMIO_VAL, 8);
> +	while (1) {
>  		vcpu_run(vm, VCPU_ID);
> -	} while (run->exit_reason == KVM_EXIT_MMIO);
> +		if (run->exit_reason != KVM_EXIT_MMIO)
> +			break;
>  
> -	TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
> +		TEST_ASSERT(!run->mmio.is_write, "Unexpected exit mmio write");
> +		TEST_ASSERT(run->mmio.len == 8,
> +			    "Unexpected exit mmio size = %u", run->mmio.len);
> +
> +		TEST_ASSERT(run->mmio.phys_addr == MEM_REGION_GPA,
> +			    "Unexpected exit mmio address = 0x%llx",
> +			    run->mmio.phys_addr);
> +	}
> +
> +	if (run->exit_reason == KVM_EXIT_IO) {
> +		(void)get_ucall(vm, VCPU_ID, &uc);
> +		TEST_FAIL("%s at %s:%ld",
> +			  (const char *)uc.args[0], __FILE__, uc.args[1]);
> +	}
> +
> +	TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN ||
> +		    run->exit_reason == KVM_INTERNAL_ERROR_EMULATION,
>  		    "Unexpected exit reason = %d", run->exit_reason);
>  
> -	cmd = get_ucall(vm, VCPU_ID, &uc);
> -	TEST_ASSERT(cmd == UCALL_DONE, "Unexpected val in guest = %lu", uc.args[0]);
> +	vcpu_regs_get(vm, VCPU_ID, &regs);
> +
> +	TEST_ASSERT(regs.rip >= final_rip_start &&
> +		    regs.rip < final_rip_end,
> +		    "Bad rip, expected 0x%lx - 0x%lx, got 0x%llx\n",
> +		    final_rip_start, final_rip_end, regs.rip);
> +
>  	return NULL;
>  }
>  
> @@ -72,6 +139,13 @@ static void test_move_memory_region(void)
>  	uint64_t *hva;
>  	uint64_t gpa;
>  
> +	vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
> +	vm_vcpu_add(vm, VCPU_ID);
> +	/* Fails with ENOSPC because the MMU can't create pages (no slots). */
> +	TEST_ASSERT(_vcpu_run(vm, VCPU_ID) == -1 && errno == ENOSPC,
> +		    "Unexpected error code = %d", errno);
> +	kvm_vm_free(vm);
> +
>  	vm = vm_create_default(VCPU_ID, 0, guest_code);
>  
>  	vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
> @@ -105,7 +179,6 @@ static void test_move_memory_region(void)
>  	 */
>  	vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA - 4096);
>  	WRITE_ONCE(*hva, 2);
> -
>  	usleep(100000);
>  
>  	/*
> @@ -116,6 +189,27 @@ static void test_move_memory_region(void)
>  
>  	/* Restore the original base, the guest should see "1". */
>  	vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA);
> +	usleep(100000);
> +
> +	/* Delete the memory region, the guest should not die. */
> +	vm_mem_region_delete(vm, MEM_REGION_SLOT);
> +	usleep(100000);
> +
> +	/* Recreate the memory region.  The guest should see "0". */
> +	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
> +				    MEM_REGION_GPA, MEM_REGION_SLOT,
> +				    MEM_REGION_SIZE / getpagesize(), 0);
> +	usleep(100000);
> +
> +	/* Delete the region again so that there's only one memslot left. */
> +	vm_mem_region_delete(vm, MEM_REGION_SLOT);
> +	usleep(100000);
> +
> +	/*
> +	 * Delete the primary memslot.  This should cause an emulation error or
> +	 * shutdown due to the page tables getting nuked.
> +	 */
> +	vm_mem_region_delete(vm, VM_PRIMARY_MEM_SLOT);
>  
>  	pthread_join(vcpu_thread, NULL);
>  
> -- 
> 2.24.1
>
Sean Christopherson March 23, 2020, 9:43 p.m. UTC | #2
On Mon, Mar 23, 2020 at 03:06:36PM -0400, Peter Xu wrote:
> On Fri, Mar 20, 2020 at 01:55:46PM -0700, Sean Christopherson wrote:
> > +	/*
> > +	 * Spin until the memory region is moved to a misaligned address.  This
> > +	 * may or may not trigger MMIO, as the window where the memslot is
> > +	 * invalid is quite small.
> > +	 */
> > +	val = guest_spin_on_val(0);
> > +	GUEST_ASSERT(val == 1 || val == MMIO_VAL);
> > +
> > +	/* Spin until the memory region is realigned. */
> > +	GUEST_ASSERT(guest_spin_on_val(MMIO_VAL) == 1);
> 
> IIUC ideally we should do GUEST_SYNC() after each GUEST_ASSERT() to
> make sure the two threads are in sync.  Otherwise e.g. there's no
> guarantee that the main thread won't run too fast to quickly remove
> the memslot and re-add it back before the guest_spin_on_val() starts
> above, then the assert could trigger when it reads the value as zero.

Hrm, I was thinking ucall wasn't available across pthreads, but it's just
dumped into a global variable.  I'll rework this to replace the udelay()
hacks with proper synchronization.
Peter Xu March 23, 2020, 9:58 p.m. UTC | #3
On Mon, Mar 23, 2020 at 02:43:18PM -0700, Sean Christopherson wrote:
> On Mon, Mar 23, 2020 at 03:06:36PM -0400, Peter Xu wrote:
> > On Fri, Mar 20, 2020 at 01:55:46PM -0700, Sean Christopherson wrote:
> > > +	/*
> > > +	 * Spin until the memory region is moved to a misaligned address.  This
> > > +	 * may or may not trigger MMIO, as the window where the memslot is
> > > +	 * invalid is quite small.
> > > +	 */
> > > +	val = guest_spin_on_val(0);
> > > +	GUEST_ASSERT(val == 1 || val == MMIO_VAL);
> > > +
> > > +	/* Spin until the memory region is realigned. */
> > > +	GUEST_ASSERT(guest_spin_on_val(MMIO_VAL) == 1);
> > 
> > IIUC ideally we should do GUEST_SYNC() after each GUEST_ASSERT() to
> > make sure the two threads are in sync.  Otherwise e.g. there's no
> > guarantee that the main thread won't run too fast to quickly remove
> > the memslot and re-add it back before the guest_spin_on_val() starts
> > above, then the assert could trigger when it reads the value as zero.
> 
> Hrm, I was thinking ucall wasn't available across pthreads, but it's just
> dumped into a global variable.  I'll rework this to replace the udelay()
> hacks with proper synchronization.

I think ucall should work for pthread (shared address space of either
kvm_run or guest memories), however my thought was even simpler than
that, something like:

  - in guest code: do GUEST_SYNC after each GUEST_ASSERT
  - introduce a global_sem
  - in vcpu thread: when receive GUEST_SYNC, do "sem_post(&global_sem)"
  - in main thread: replace all usleep() with "sem_wait(&global_sem)"
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/x86_64/set_memory_region_test.c b/tools/testing/selftests/kvm/x86_64/set_memory_region_test.c
index c6691cff4e19..44aed8ac932b 100644
--- a/tools/testing/selftests/kvm/x86_64/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/x86_64/set_memory_region_test.c
@@ -26,42 +26,109 @@ 
 #define MEM_REGION_SIZE		0x200000
 #define MEM_REGION_SLOT		10
 
-static void guest_code(void)
+static const uint64_t MMIO_VAL = 0xbeefull;
+
+extern const uint64_t final_rip_start;
+extern const uint64_t final_rip_end;
+
+static inline uint64_t guest_spin_on_val(uint64_t spin_val)
 {
 	uint64_t val;
 
 	do {
 		val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA));
-	} while (!val);
+	} while (val == spin_val);
+	return val;
+}
 
-	if (val != 1)
-		ucall(UCALL_ABORT, 1, val);
+static void guest_code(void)
+{
+	uint64_t val;
 
-	GUEST_DONE();
+	/*
+	 * Spin until the memory region is moved to a misaligned address.  This
+	 * may or may not trigger MMIO, as the window where the memslot is
+	 * invalid is quite small.
+	 */
+	val = guest_spin_on_val(0);
+	GUEST_ASSERT(val == 1 || val == MMIO_VAL);
+
+	/* Spin until the memory region is realigned. */
+	GUEST_ASSERT(guest_spin_on_val(MMIO_VAL) == 1);
+
+	/* Spin until the memory region is deleted. */
+	GUEST_ASSERT(guest_spin_on_val(1) == MMIO_VAL);
+
+	/* Spin until the memory region is recreated. */
+	GUEST_ASSERT(guest_spin_on_val(MMIO_VAL) == 0);
+
+	/* Spin until the memory region is deleted. */
+	GUEST_ASSERT(guest_spin_on_val(0) == MMIO_VAL);
+
+	asm("1:\n\t"
+	    ".pushsection .rodata\n\t"
+	    ".global final_rip_start\n\t"
+	    "final_rip_start: .quad 1b\n\t"
+	    ".popsection");
+
+	/* Spin indefinitely (until the code memslot is deleted). */
+	guest_spin_on_val(MMIO_VAL);
+
+	asm("1:\n\t"
+	    ".pushsection .rodata\n\t"
+	    ".global final_rip_end\n\t"
+	    "final_rip_end: .quad 1b\n\t"
+	    ".popsection");
+
+	GUEST_ASSERT(0);
 }
 
 static void *vcpu_worker(void *data)
 {
 	struct kvm_vm *vm = data;
+	struct kvm_regs regs;
 	struct kvm_run *run;
 	struct ucall uc;
-	uint64_t cmd;
 
 	/*
 	 * Loop until the guest is done.  Re-enter the guest on all MMIO exits,
-	 * which will occur if the guest attempts to access a memslot while it
-	 * is being moved.
+	 * which will occur if the guest attempts to access a memslot after it
+	 * has been deleted or while it is being moved .
 	 */
 	run = vcpu_state(vm, VCPU_ID);
-	do {
+
+	memcpy(run->mmio.data, &MMIO_VAL, 8);
+	while (1) {
 		vcpu_run(vm, VCPU_ID);
-	} while (run->exit_reason == KVM_EXIT_MMIO);
+		if (run->exit_reason != KVM_EXIT_MMIO)
+			break;
 
-	TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+		TEST_ASSERT(!run->mmio.is_write, "Unexpected exit mmio write");
+		TEST_ASSERT(run->mmio.len == 8,
+			    "Unexpected exit mmio size = %u", run->mmio.len);
+
+		TEST_ASSERT(run->mmio.phys_addr == MEM_REGION_GPA,
+			    "Unexpected exit mmio address = 0x%llx",
+			    run->mmio.phys_addr);
+	}
+
+	if (run->exit_reason == KVM_EXIT_IO) {
+		(void)get_ucall(vm, VCPU_ID, &uc);
+		TEST_FAIL("%s at %s:%ld",
+			  (const char *)uc.args[0], __FILE__, uc.args[1]);
+	}
+
+	TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN ||
+		    run->exit_reason == KVM_INTERNAL_ERROR_EMULATION,
 		    "Unexpected exit reason = %d", run->exit_reason);
 
-	cmd = get_ucall(vm, VCPU_ID, &uc);
-	TEST_ASSERT(cmd == UCALL_DONE, "Unexpected val in guest = %lu", uc.args[0]);
+	vcpu_regs_get(vm, VCPU_ID, &regs);
+
+	TEST_ASSERT(regs.rip >= final_rip_start &&
+		    regs.rip < final_rip_end,
+		    "Bad rip, expected 0x%lx - 0x%lx, got 0x%llx\n",
+		    final_rip_start, final_rip_end, regs.rip);
+
 	return NULL;
 }
 
@@ -72,6 +139,13 @@  static void test_move_memory_region(void)
 	uint64_t *hva;
 	uint64_t gpa;
 
+	vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+	vm_vcpu_add(vm, VCPU_ID);
+	/* Fails with ENOSPC because the MMU can't create pages (no slots). */
+	TEST_ASSERT(_vcpu_run(vm, VCPU_ID) == -1 && errno == ENOSPC,
+		    "Unexpected error code = %d", errno);
+	kvm_vm_free(vm);
+
 	vm = vm_create_default(VCPU_ID, 0, guest_code);
 
 	vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
@@ -105,7 +179,6 @@  static void test_move_memory_region(void)
 	 */
 	vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA - 4096);
 	WRITE_ONCE(*hva, 2);
-
 	usleep(100000);
 
 	/*
@@ -116,6 +189,27 @@  static void test_move_memory_region(void)
 
 	/* Restore the original base, the guest should see "1". */
 	vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA);
+	usleep(100000);
+
+	/* Delete the memory region, the guest should not die. */
+	vm_mem_region_delete(vm, MEM_REGION_SLOT);
+	usleep(100000);
+
+	/* Recreate the memory region.  The guest should see "0". */
+	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
+				    MEM_REGION_GPA, MEM_REGION_SLOT,
+				    MEM_REGION_SIZE / getpagesize(), 0);
+	usleep(100000);
+
+	/* Delete the region again so that there's only one memslot left. */
+	vm_mem_region_delete(vm, MEM_REGION_SLOT);
+	usleep(100000);
+
+	/*
+	 * Delete the primary memslot.  This should cause an emulation error or
+	 * shutdown due to the page tables getting nuked.
+	 */
+	vm_mem_region_delete(vm, VM_PRIMARY_MEM_SLOT);
 
 	pthread_join(vcpu_thread, NULL);