diff mbox series

[v3,07/10] selftests: kvm: s390: Add uc_map_unmap VM test case

Message ID 20240730072413.143556-8-schlameuss@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series selftests: kvm: s390: Add s390x ucontrol selftests | expand

Commit Message

Christoph Schlameuss July 30, 2024, 7:24 a.m. UTC
Add a test case verifying basic running and interaction of ucontrol VMs.
Fill the segment and page tables for allocated memory and map memory on
first access.

* uc_map_unmap
  Store and load data to mapped and unmapped memory and use pic segment
  translation handling to map memory on access.

Signed-off-by: Christoph Schlameuss <schlameuss@linux.ibm.com>
---
 .../selftests/kvm/s390x/ucontrol_test.c       | 165 +++++++++++++++++-
 1 file changed, 164 insertions(+), 1 deletion(-)

Comments

Janosch Frank Aug. 1, 2024, 9:08 a.m. UTC | #1
On 7/30/24 9:24 AM, Christoph Schlameuss wrote:
> Add a test case verifying basic running and interaction of ucontrol VMs.
> Fill the segment and page tables for allocated memory and map memory on
> first access.
> 
> * uc_map_unmap
>    Store and load data to mapped and unmapped memory and use pic segment
>    translation handling to map memory on access.
> 
> Signed-off-by: Christoph Schlameuss <schlameuss@linux.ibm.com>
> ---
>   .../selftests/kvm/s390x/ucontrol_test.c       | 165 +++++++++++++++++-
>   1 file changed, 164 insertions(+), 1 deletion(-)
> 
> diff --git a/tools/testing/selftests/kvm/s390x/ucontrol_test.c b/tools/testing/selftests/kvm/s390x/ucontrol_test.c
> index 817b1e08559c..b7f760f980fd 100644
> --- a/tools/testing/selftests/kvm/s390x/ucontrol_test.c
> +++ b/tools/testing/selftests/kvm/s390x/ucontrol_test.c
> @@ -16,7 +16,13 @@
>   #include <linux/capability.h>
>   #include <linux/sizes.h>
>   
> +#define UC_PIC_SEGMENT_TRANSLATION 0x10

That's a bit clearer and used by KVM:
#define PGM_SEGMENT_TRANSLATION		0x10

> +
>   #define VM_MEM_SIZE (4 * SZ_1M)
> +#define VM_MEM_EXT_SIZE (2 * SZ_1M)
> +#define VM_MEM_MAX (VM_MEM_SIZE + VM_MEM_EXT_SIZE)
> +

[...]

> +	self->pgd = self->base_gpa + SZ_1M; /* set PASCE addr */

Please put the comments on the line above

> +	phd = gpa2hva(self, self->pgd);
> +	memset(phd, 0xff, PAGES_PER_SEGMENT * PAGE_SIZE);
> +
> +	for (si = 0; si < ((VM_MEM_SIZE + VM_MEM_EXT_SIZE) / SZ_1M); si++) {
> +		/* create ste */
> +		phd[si] = (self->pgd
> +			+ (PAGES_PER_SEGMENT * PAGE_SIZE
> +				* ((VM_MEM_SIZE + VM_MEM_EXT_SIZE) / SZ_1M))
> +			+ (PAGES_PER_SEGMENT * PAGE_SIZE * si)) & ~0x7fful;
> +		se_addr = gpa2hva(self, phd[si]);
> +		memset(se_addr, 0xff, PAGES_PER_SEGMENT * PAGE_SIZE);
> +		for (pi = 0; pi < (SZ_1M / PAGE_SIZE); pi++) {
> +			/* create pte */
> +			((u64 *)se_addr)[pi] = (self->base_gpa
> +				+ (si * SZ_1M) + (pi * PAGE_SIZE)) & ~0xffful;
> +		}
> +	}
> +	pr_debug("segment table entry %p (0x%lx) --> %p\n",
> +		 phd, phd[0], gpa2hva(self, (phd[0] & ~0x7fful)));
> +	print_hex_bytes("st", (u64)phd, 64);
> +	print_hex_bytes("pt", (u64)gpa2hva(self, phd[0]), 128);
> +	print_hex_bytes("pt+", (u64)
> +			gpa2hva(self, phd[0] + (PAGES_PER_SEGMENT * PAGE_SIZE
> +			* ((VM_MEM_SIZE + VM_MEM_EXT_SIZE) / SZ_1M)) - 0x64), 128);
> +
> +	sync_regs->crs[1] = self->pgd | 0x3;	/* PASCE TT=00 for segment table */

Same here

> +	run->kvm_dirty_regs |= KVM_SYNC_CRS;
> +}
> +
> +static void uc_handle_exit_ucontrol(FIXTURE_DATA(uc_kvm) * self)
> +{
> +	struct kvm_run *run = self->run;
> +
> +	TEST_ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
> +	switch (run->s390_ucontrol.pgm_code) {
> +	case UC_PIC_SEGMENT_TRANSLATION:
> +		pr_info("ucontrol pic segment translation 0x%llx\n",
> +			run->s390_ucontrol.trans_exc_code);
> +		/* map / make additional memory available */
> +		struct kvm_s390_ucas_mapping map2 = {
> +			.user_addr = (u64)gpa2hva(self, run->s390_ucontrol.trans_exc_code),
> +			.vcpu_addr = run->s390_ucontrol.trans_exc_code,
> +			.length = VM_MEM_EXT_SIZE,
> +		};
> +		pr_info("ucas map %p %p 0x%llx\n",
> +			(void *)map2.user_addr, (void *)map2.vcpu_addr, map2.length);
> +		TEST_ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map2));
> +		break;
> +	default:
> +		TEST_FAIL("UNEXPECTED PGM CODE %d", run->s390_ucontrol.pgm_code);
> +	}
> +}
> +
>   /* verify SIEIC exit
>    * * reset stop requests
>    * * fail on codes not expected in the test cases
> @@ -245,7 +338,11 @@ static bool uc_handle_sieic(FIXTURE_DATA(uc_kvm) * self)
>   		break;
>   	case ICPT_INST:
>   		/* end execution in caller on intercepted instruction */
> +		pr_info("sie instruction interception\n");

That should have been part of an earlier patch?

>   		return false;
> +	case ICPT_OPEREXC:
> +		/* operation exception */
> +		TEST_FAIL("sie exception on %.4x%.8x", sie_block->ipa, sie_block->ipb);
>   	default:
>   		TEST_FAIL("UNEXPECTED SIEIC CODE %d", run->s390_sieic.icptcode);
>   	}
> @@ -258,6 +355,11 @@ static bool uc_handle_exit(FIXTURE_DATA(uc_kvm) * self)
>   	struct kvm_run *run = self->run;
>   
>   	switch (run->exit_reason) {
> +	case KVM_EXIT_S390_UCONTROL:
> +		/* check program interruption code */
> +		/* handle page fault --> ucas map */

Multi-line comments do exist

> +		uc_handle_exit_ucontrol(self);
> +		break;
>   	case KVM_EXIT_S390_SIEIC:
>   		return uc_handle_sieic(self);
>   	default:
> @@ -289,6 +391,67 @@ static void uc_assert_diag44(FIXTURE_DATA(uc_kvm) * self)
>   	TEST_ASSERT_EQ(0x440000, sie_block->ipb);
>   }
>   
> +TEST_F(uc_kvm, uc_map_unmap)
> +{
> +	struct kvm_sync_regs *sync_regs = &self->run->s.regs;
> +	struct kvm_run *run = self->run;
> +	int rc;
> +
> +	init_st_pt(self);
> +
> +	/* copy test_mem_asm to code_hva / code_gpa */
> +	TH_LOG("copy code %p to vm mapped memory %p / %p",
> +	       &test_mem_asm, (void *)self->code_hva, (void *)self->code_gpa);
> +	memcpy((void *)self->code_hva, &test_mem_asm, PAGE_SIZE);
> +
> +	run->psw_mask = 0x0400000180000000ULL;  /* DAT enabled + 64 bit mode */
> +	run->psw_addr = self->code_gpa;


Please put the comments on the line above

> +
> +	/* set register content for test_mem_asm to access not mapped memory*/
> +	sync_regs->gprs[1] = 0x55;
> +	sync_regs->gprs[5] = self->base_gpa;
> +	sync_regs->gprs[6] = VM_MEM_SIZE;
> +	run->kvm_dirty_regs |= KVM_SYNC_GPRS;
> +
> +	/* run and expect to fail witch ucontrol pic segment translation */
> +	ASSERT_EQ(0, uc_run_once(self));
> +	ASSERT_EQ(1, sync_regs->gprs[0]);
> +	ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
> +
> +	ASSERT_EQ(UC_PIC_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code);
> +	ASSERT_EQ(self->base_gpa + VM_MEM_SIZE, run->s390_ucontrol.trans_exc_code);
> +	/* map / make additional memory available */
> +	struct kvm_s390_ucas_mapping map2 = {
> +		.user_addr = (u64)gpa2hva(self, self->base_gpa + VM_MEM_SIZE),
> +		.vcpu_addr = self->base_gpa + VM_MEM_SIZE,
> +		.length = VM_MEM_EXT_SIZE,
> +	};
> +	TH_LOG("ucas map %p %p 0x%llx",
> +	       (void *)map2.user_addr, (void *)map2.vcpu_addr, map2.length);
> +	rc = ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map2);
> +	ASSERT_EQ(0, rc)
> +		TH_LOG("ucas map result %d not expected, %s", rc, strerror(errno));
> +	ASSERT_EQ(0, uc_run_once(self));
> +	ASSERT_EQ(false, uc_handle_exit(self));
> +	uc_assert_diag44(self);
> +
> +	/* assert registers and memory are in expected state */
> +	ASSERT_EQ(2, sync_regs->gprs[0]);
> +	ASSERT_EQ(0x55, sync_regs->gprs[1]);
> +	ASSERT_EQ(0x55, *(u32 *)gpa2hva(self, self->base_gpa + VM_MEM_SIZE));
> +
> +	/* unmap and run loop again */
> +	TH_LOG("ucas unmap %p %p 0x%llx",
> +	       (void *)map2.user_addr, (void *)map2.vcpu_addr, map2.length);
> +	rc = ioctl(self->vcpu_fd, KVM_S390_UCAS_UNMAP, &map2);
> +	ASSERT_EQ(0, rc)
> +		TH_LOG("ucas map result %d not expected, %s", rc, strerror(errno));
> +	ASSERT_EQ(0, uc_run_once(self));
> +	ASSERT_EQ(3, sync_regs->gprs[0]);
> +	ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
> +	ASSERT_EQ(true, uc_handle_exit(self));
> +}
> +
>   TEST_F(uc_kvm, uc_gprs)
>   {
>   	struct kvm_sync_regs *sync_regs = &self->run->s.regs;
Christoph Schlameuss Aug. 1, 2024, 3:06 p.m. UTC | #2
On Thu, 1 Aug 2024 11:08:30 +0200
Janosch Frank <frankja@linux.ibm.com> wrote:

> On 7/30/24 9:24 AM, Christoph Schlameuss wrote:
> > Add a test case verifying basic running and interaction of ucontrol VMs.
> > Fill the segment and page tables for allocated memory and map memory on
> > first access.
> > 
> > * uc_map_unmap
> >    Store and load data to mapped and unmapped memory and use pic segment
> >    translation handling to map memory on access.
> > 
> > Signed-off-by: Christoph Schlameuss <schlameuss@linux.ibm.com>
> > ---
> >   .../selftests/kvm/s390x/ucontrol_test.c       | 165 +++++++++++++++++-
> >   1 file changed, 164 insertions(+), 1 deletion(-)
> > 
> > diff --git a/tools/testing/selftests/kvm/s390x/ucontrol_test.c b/tools/testing/selftests/kvm/s390x/ucontrol_test.c
> > index 817b1e08559c..b7f760f980fd 100644
> > --- a/tools/testing/selftests/kvm/s390x/ucontrol_test.c
> > +++ b/tools/testing/selftests/kvm/s390x/ucontrol_test.c
> > @@ -16,7 +16,13 @@
> >   #include <linux/capability.h>
> >   #include <linux/sizes.h>
> >   
> > +#define UC_PIC_SEGMENT_TRANSLATION 0x10  
> 
> That's a bit clearer and used by KVM:
> #define PGM_SEGMENT_TRANSLATION		0x10
> 

I will rename the constant here. (The original constant is defined in
kvm_host.h which is not pulled into the userspace selftests.)
Also since this is only used here so far and does not really fit into
processor.h or sie.h, I would leave that here for now.

[...]

> > @@ -245,7 +338,11 @@ static bool uc_handle_sieic(FIXTURE_DATA(uc_kvm) * self)
> >   		break;
> >   	case ICPT_INST:
> >   		/* end execution in caller on intercepted instruction */
> > +		pr_info("sie instruction interception\n");  
> 
> That should have been part of an earlier patch?
> 

Yes, on closer observation this is actually already needed in patch 6:
"selftests: kvm: s390: Add VM run test case".

I will also make sure all patches do run on its own again before
sending the next version.

Good catch, thank you.

[...]

I will also fix up the comments as advised.

Christoph
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/s390x/ucontrol_test.c b/tools/testing/selftests/kvm/s390x/ucontrol_test.c
index 817b1e08559c..b7f760f980fd 100644
--- a/tools/testing/selftests/kvm/s390x/ucontrol_test.c
+++ b/tools/testing/selftests/kvm/s390x/ucontrol_test.c
@@ -16,7 +16,13 @@ 
 #include <linux/capability.h>
 #include <linux/sizes.h>
 
+#define UC_PIC_SEGMENT_TRANSLATION 0x10
+
 #define VM_MEM_SIZE (4 * SZ_1M)
+#define VM_MEM_EXT_SIZE (2 * SZ_1M)
+#define VM_MEM_MAX (VM_MEM_SIZE + VM_MEM_EXT_SIZE)
+
+#define PAGES_PER_SEGMENT 4
 
 /* so directly declare capget to check caps without libcap */
 int capget(cap_user_header_t header, cap_user_data_t data);
@@ -58,6 +64,23 @@  asm("test_gprs_asm:\n"
 	"	j	0b\n"
 );
 
+/* Test program manipulating memory */
+extern char test_mem_asm[];
+asm("test_mem_asm:\n"
+	"xgr	%r0, %r0\n"
+
+	"0:\n"
+	"	ahi	%r0,1\n"
+	"	st	%r1,0(%r5,%r6)\n"
+
+	"	xgr	%r1, %r1\n"
+	"	l	%r1,0(%r5,%r6)\n"
+	"	ahi	%r0,1\n"
+	"	diag	0,0,0x44\n"
+
+	"	j	0b\n"
+);
+
 FIXTURE(uc_kvm)
 {
 	struct kvm_s390_sie_block *sie_block;
@@ -67,6 +90,7 @@  FIXTURE(uc_kvm)
 	uintptr_t base_hva;
 	uintptr_t code_hva;
 	int kvm_run_size;
+	vm_paddr_t pgd;
 	void *vm_mem;
 	int vcpu_fd;
 	int kvm_fd;
@@ -116,7 +140,7 @@  FIXTURE_SETUP(uc_kvm)
 	self->base_gpa = 0;
 	self->code_gpa = self->base_gpa + (3 * SZ_1M);
 
-	self->vm_mem = aligned_alloc(SZ_1M, VM_MEM_SIZE);
+	self->vm_mem = aligned_alloc(SZ_1M, VM_MEM_SIZE + VM_MEM_EXT_SIZE);
 	ASSERT_NE(NULL, self->vm_mem) TH_LOG("malloc failed %u", errno);
 	self->base_hva = (uintptr_t)self->vm_mem;
 	self->code_hva = self->base_hva - self->base_gpa + self->code_gpa;
@@ -222,6 +246,75 @@  TEST(uc_cap_hpage)
 	close(kvm_fd);
 }
 
+/* calculate host virtual addr from guest physical addr */
+static void *gpa2hva(FIXTURE_DATA(uc_kvm) * self, u64 gpa)
+{
+	return (void *)(self->base_hva - self->base_gpa + gpa);
+}
+
+/* initialize segment and page tables for uc_kvm tests */
+static void init_st_pt(FIXTURE_DATA(uc_kvm) * self)
+{
+	struct kvm_sync_regs *sync_regs = &self->run->s.regs;
+	struct kvm_run *run = self->run;
+	void *se_addr;
+	int si, pi;
+	u64 *phd;
+
+	self->pgd = self->base_gpa + SZ_1M; /* set PASCE addr */
+	phd = gpa2hva(self, self->pgd);
+	memset(phd, 0xff, PAGES_PER_SEGMENT * PAGE_SIZE);
+
+	for (si = 0; si < ((VM_MEM_SIZE + VM_MEM_EXT_SIZE) / SZ_1M); si++) {
+		/* create ste */
+		phd[si] = (self->pgd
+			+ (PAGES_PER_SEGMENT * PAGE_SIZE
+				* ((VM_MEM_SIZE + VM_MEM_EXT_SIZE) / SZ_1M))
+			+ (PAGES_PER_SEGMENT * PAGE_SIZE * si)) & ~0x7fful;
+		se_addr = gpa2hva(self, phd[si]);
+		memset(se_addr, 0xff, PAGES_PER_SEGMENT * PAGE_SIZE);
+		for (pi = 0; pi < (SZ_1M / PAGE_SIZE); pi++) {
+			/* create pte */
+			((u64 *)se_addr)[pi] = (self->base_gpa
+				+ (si * SZ_1M) + (pi * PAGE_SIZE)) & ~0xffful;
+		}
+	}
+	pr_debug("segment table entry %p (0x%lx) --> %p\n",
+		 phd, phd[0], gpa2hva(self, (phd[0] & ~0x7fful)));
+	print_hex_bytes("st", (u64)phd, 64);
+	print_hex_bytes("pt", (u64)gpa2hva(self, phd[0]), 128);
+	print_hex_bytes("pt+", (u64)
+			gpa2hva(self, phd[0] + (PAGES_PER_SEGMENT * PAGE_SIZE
+			* ((VM_MEM_SIZE + VM_MEM_EXT_SIZE) / SZ_1M)) - 0x64), 128);
+
+	sync_regs->crs[1] = self->pgd | 0x3;	/* PASCE TT=00 for segment table */
+	run->kvm_dirty_regs |= KVM_SYNC_CRS;
+}
+
+static void uc_handle_exit_ucontrol(FIXTURE_DATA(uc_kvm) * self)
+{
+	struct kvm_run *run = self->run;
+
+	TEST_ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
+	switch (run->s390_ucontrol.pgm_code) {
+	case UC_PIC_SEGMENT_TRANSLATION:
+		pr_info("ucontrol pic segment translation 0x%llx\n",
+			run->s390_ucontrol.trans_exc_code);
+		/* map / make additional memory available */
+		struct kvm_s390_ucas_mapping map2 = {
+			.user_addr = (u64)gpa2hva(self, run->s390_ucontrol.trans_exc_code),
+			.vcpu_addr = run->s390_ucontrol.trans_exc_code,
+			.length = VM_MEM_EXT_SIZE,
+		};
+		pr_info("ucas map %p %p 0x%llx\n",
+			(void *)map2.user_addr, (void *)map2.vcpu_addr, map2.length);
+		TEST_ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map2));
+		break;
+	default:
+		TEST_FAIL("UNEXPECTED PGM CODE %d", run->s390_ucontrol.pgm_code);
+	}
+}
+
 /* verify SIEIC exit
  * * reset stop requests
  * * fail on codes not expected in the test cases
@@ -245,7 +338,11 @@  static bool uc_handle_sieic(FIXTURE_DATA(uc_kvm) * self)
 		break;
 	case ICPT_INST:
 		/* end execution in caller on intercepted instruction */
+		pr_info("sie instruction interception\n");
 		return false;
+	case ICPT_OPEREXC:
+		/* operation exception */
+		TEST_FAIL("sie exception on %.4x%.8x", sie_block->ipa, sie_block->ipb);
 	default:
 		TEST_FAIL("UNEXPECTED SIEIC CODE %d", run->s390_sieic.icptcode);
 	}
@@ -258,6 +355,11 @@  static bool uc_handle_exit(FIXTURE_DATA(uc_kvm) * self)
 	struct kvm_run *run = self->run;
 
 	switch (run->exit_reason) {
+	case KVM_EXIT_S390_UCONTROL:
+		/* check program interruption code */
+		/* handle page fault --> ucas map */
+		uc_handle_exit_ucontrol(self);
+		break;
 	case KVM_EXIT_S390_SIEIC:
 		return uc_handle_sieic(self);
 	default:
@@ -289,6 +391,67 @@  static void uc_assert_diag44(FIXTURE_DATA(uc_kvm) * self)
 	TEST_ASSERT_EQ(0x440000, sie_block->ipb);
 }
 
+TEST_F(uc_kvm, uc_map_unmap)
+{
+	struct kvm_sync_regs *sync_regs = &self->run->s.regs;
+	struct kvm_run *run = self->run;
+	int rc;
+
+	init_st_pt(self);
+
+	/* copy test_mem_asm to code_hva / code_gpa */
+	TH_LOG("copy code %p to vm mapped memory %p / %p",
+	       &test_mem_asm, (void *)self->code_hva, (void *)self->code_gpa);
+	memcpy((void *)self->code_hva, &test_mem_asm, PAGE_SIZE);
+
+	run->psw_mask = 0x0400000180000000ULL;  /* DAT enabled + 64 bit mode */
+	run->psw_addr = self->code_gpa;
+
+	/* set register content for test_mem_asm to access not mapped memory*/
+	sync_regs->gprs[1] = 0x55;
+	sync_regs->gprs[5] = self->base_gpa;
+	sync_regs->gprs[6] = VM_MEM_SIZE;
+	run->kvm_dirty_regs |= KVM_SYNC_GPRS;
+
+	/* run and expect to fail witch ucontrol pic segment translation */
+	ASSERT_EQ(0, uc_run_once(self));
+	ASSERT_EQ(1, sync_regs->gprs[0]);
+	ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
+
+	ASSERT_EQ(UC_PIC_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code);
+	ASSERT_EQ(self->base_gpa + VM_MEM_SIZE, run->s390_ucontrol.trans_exc_code);
+	/* map / make additional memory available */
+	struct kvm_s390_ucas_mapping map2 = {
+		.user_addr = (u64)gpa2hva(self, self->base_gpa + VM_MEM_SIZE),
+		.vcpu_addr = self->base_gpa + VM_MEM_SIZE,
+		.length = VM_MEM_EXT_SIZE,
+	};
+	TH_LOG("ucas map %p %p 0x%llx",
+	       (void *)map2.user_addr, (void *)map2.vcpu_addr, map2.length);
+	rc = ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map2);
+	ASSERT_EQ(0, rc)
+		TH_LOG("ucas map result %d not expected, %s", rc, strerror(errno));
+	ASSERT_EQ(0, uc_run_once(self));
+	ASSERT_EQ(false, uc_handle_exit(self));
+	uc_assert_diag44(self);
+
+	/* assert registers and memory are in expected state */
+	ASSERT_EQ(2, sync_regs->gprs[0]);
+	ASSERT_EQ(0x55, sync_regs->gprs[1]);
+	ASSERT_EQ(0x55, *(u32 *)gpa2hva(self, self->base_gpa + VM_MEM_SIZE));
+
+	/* unmap and run loop again */
+	TH_LOG("ucas unmap %p %p 0x%llx",
+	       (void *)map2.user_addr, (void *)map2.vcpu_addr, map2.length);
+	rc = ioctl(self->vcpu_fd, KVM_S390_UCAS_UNMAP, &map2);
+	ASSERT_EQ(0, rc)
+		TH_LOG("ucas map result %d not expected, %s", rc, strerror(errno));
+	ASSERT_EQ(0, uc_run_once(self));
+	ASSERT_EQ(3, sync_regs->gprs[0]);
+	ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
+	ASSERT_EQ(true, uc_handle_exit(self));
+}
+
 TEST_F(uc_kvm, uc_gprs)
 {
 	struct kvm_sync_regs *sync_regs = &self->run->s.regs;