diff mbox

[3/3] KVM: x86 emulator: fuzz tester

Message ID 1314020469-30882-4-git-send-email-avi@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Avi Kivity Aug. 22, 2011, 1:41 p.m. UTC
The x86 emulator is directly exposed to guest code; therefore it is part
of the directly exposed attack surface.  To reduce the risk of
vulnerabilities, this patch adds a fuzz test that runs random instructions
through the emulator.  A vulnerability will usually result in an oops.

One way to run the test is via KVM itself:

  qemu -enable-kvm -smp 4 -serial stdio -kernel bzImage \
      -append 'console=ttyS0 test_emulator.iterations=1000000000'

this requires that the test module be built into the kernel.

Signed-off-by: Avi Kivity <avi@redhat.com>
---
 arch/x86/Kbuild              |    1 +
 arch/x86/kvm/Kconfig         |   11 +
 arch/x86/kvm/Makefile        |    1 +
 arch/x86/kvm/test-emulator.c |  533 ++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 546 insertions(+), 0 deletions(-)
 create mode 100644 arch/x86/kvm/test-emulator.c

Comments

Marcelo Tosatti Aug. 25, 2011, 4:27 p.m. UTC | #1
On Mon, Aug 22, 2011 at 04:41:09PM +0300, Avi Kivity wrote:
> The x86 emulator is directly exposed to guest code; therefore it is part
> of the directly exposed attack surface.  To reduce the risk of
> vulnerabilities, this patch adds a fuzz test that runs random instructions
> through the emulator.  A vulnerability will usually result in an oops.
> 
> One way to run the test is via KVM itself:
> 
>   qemu -enable-kvm -smp 4 -serial stdio -kernel bzImage \
>       -append 'console=ttyS0 test_emulator.iterations=1000000000'
> 
> this requires that the test module be built into the kernel.
> 
> Signed-off-by: Avi Kivity <avi@redhat.com>
> ---
>  arch/x86/Kbuild              |    1 +
>  arch/x86/kvm/Kconfig         |   11 +
>  arch/x86/kvm/Makefile        |    1 +
>  arch/x86/kvm/test-emulator.c |  533 ++++++++++++++++++++++++++++++++++++++++++
>  4 files changed, 546 insertions(+), 0 deletions(-)
>  create mode 100644 arch/x86/kvm/test-emulator.c
> 

> +	.fetch = test_fetch,
> +	.read_emulated = test_read,
> +	.write_emulated = test_write,
> +	.cmpxchg_emulated = test_cmpxchg,
> +	.invlpg = test_invlpg,
> +	.pio_in_emulated = test_pio_in,
> +	.pio_out_emulated = test_pio_out,
> +	.get_segment = test_get_segment,
> +	.set_segment = test_set_segment,
> +	.get_cached_segment_base = test_get_cached_segment_base,
> +	.get_gdt = test_get_desc_table,
> +	.get_idt = test_get_desc_table,
> +	.set_gdt = test_set_desc_table,
> +	.set_idt = test_set_desc_table,
> +	.get_cr = test_get_cr,
> +	.set_cr = test_set_cr,
> +	.cpl = test_cpl,
> +	.get_dr = test_get_dr,
> +	.set_dr = test_set_dr,
> +	.set_msr = test_set_msr,
> +	.get_msr = test_get_msr,
> +	.halt = test_halt,
> +	.wbinvd = test_wbinvd,
> +	.fix_hypercall = test_fix_hypercall,
> +	.get_fpu = test_get_fpu,
> +	.put_fpu = test_put_fpu,
> +	.intercept = test_intercept,
> +};
> +
> +static int modes[] = {
> +	X86EMUL_MODE_REAL,
> +	X86EMUL_MODE_VM86,
> +	X86EMUL_MODE_PROT16,
> +	X86EMUL_MODE_PROT32,
> +	X86EMUL_MODE_PROT64,
> +};
> +
> +static int test_emulator_one(struct test_context *test)
> +{
> +	struct x86_emulate_ctxt *ctxt = &test->ctxt;
> +	unsigned i;
> +	int r;
> +
> +	test->failed = false;
> +	i = 0;
> +	if (random32() & 1)
> +		test->insn[i++] = 0x0f;
> +	for (; i < 15; ++i)
> +		test->insn[i++] = random32();
> +	test->insn_base_valid = false;
> +	ctxt->ops = &test_ops;
> +	ctxt->eflags = randlong();
> +	ctxt->eip = randlong();
> +	ctxt->mode = modes[random32() % ARRAY_SIZE(modes)];
> +	ctxt->guest_mode = random32() % 16 == 0;
> +	ctxt->perm_ok = random32() % 16 == 0;
> +	ctxt->only_vendor_specific_insn = random32() % 64 == 0;
> +	memset(&ctxt->twobyte, 0,
> +	       (void *)&ctxt->regs - (void *)&ctxt->twobyte);
> +	for (i = 0; i < NR_VCPU_REGS; ++i)
> +		ctxt->regs[i] = randlong();
> +	r = x86_decode_insn(ctxt, NULL, 0);

It could rerun N times instructions that have been decoded successfully.
This would increase the chance of testing the code path for that (class
of) instruction.

Also fuzzing from an actual guest is useful to test the real backend
functions. What problem did you encounter? The new testsuite scheme
seems a good fit for that (with the exception of being locked to 32-bit
mode).
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity Aug. 25, 2011, 7:04 p.m. UTC | #2
On 08/25/2011 07:27 PM, Marcelo Tosatti wrote:
> On Mon, Aug 22, 2011 at 04:41:09PM +0300, Avi Kivity wrote:
> >  The x86 emulator is directly exposed to guest code; therefore it is part
> >  of the directly exposed attack surface.  To reduce the risk of
> >  vulnerabilities, this patch adds a fuzz test that runs random instructions
> >  through the emulator.  A vulnerability will usually result in an oops.
> >
> >  +	for (i = 0; i<  NR_VCPU_REGS; ++i)
> >  +		ctxt->regs[i] = randlong();
> >  +	r = x86_decode_insn(ctxt, NULL, 0);
>
> It could rerun N times instructions that have been decoded successfully.
> This would increase the chance of testing the code path for that (class
> of) instruction.

Good idea.  I'll keep N small (20?) so that we fuzz the decoder as well.

> Also fuzzing from an actual guest is useful to test the real backend
> functions. What problem did you encounter? The new testsuite scheme
> seems a good fit for that (with the exception of being locked to 32-bit
> mode).

Mostly that I forgot it exists.  Other issues are that it's harder to 
force random values through it - though I could allocate a couple GB and 
fill it with random values.  We also lose the ability to test inputs to 
callbacks (not that I do much of that here).

I'll try it out.
Lucas Meneghel Rodrigues Aug. 25, 2011, 10:17 p.m. UTC | #3
On 08/22/2011 10:41 AM, Avi Kivity wrote:
> The x86 emulator is directly exposed to guest code; therefore it is part
> of the directly exposed attack surface.  To reduce the risk of
> vulnerabilities, this patch adds a fuzz test that runs random instructions
> through the emulator.  A vulnerability will usually result in an oops.
>
> One way to run the test is via KVM itself:
>
>    qemu -enable-kvm -smp 4 -serial stdio -kernel bzImage \
>        -append 'console=ttyS0 test_emulator.iterations=1000000000'
>
> this requires that the test module be built into the kernel.
>
> Signed-off-by: Avi Kivity<avi@redhat.com>
> ---
>   arch/x86/Kbuild              |    1 +
>   arch/x86/kvm/Kconfig         |   11 +
>   arch/x86/kvm/Makefile        |    1 +
>   arch/x86/kvm/test-emulator.c |  533 ++++++++++++++++++++++++++++++++++++++++++
>   4 files changed, 546 insertions(+), 0 deletions(-)
>   create mode 100644 arch/x86/kvm/test-emulator.c
>
> diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
> index 0e9dec6..0d80e6f 100644
> --- a/arch/x86/Kbuild
> +++ b/arch/x86/Kbuild
> @@ -1,5 +1,6 @@
>
>   obj-$(CONFIG_KVM) += kvm/
> +obj-$(CONFIG_KVM_EMULATOR_TEST) += kvm/
>
>   # Xen paravirtualization support
>   obj-$(CONFIG_XEN) += xen/
> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
> index ff5790d..9ffc30a 100644
> --- a/arch/x86/kvm/Kconfig
> +++ b/arch/x86/kvm/Kconfig
> @@ -76,6 +76,17 @@ config KVM_MMU_AUDIT
>   	 This option adds a R/W kVM module parameter 'mmu_audit', which allows
>   	 audit  KVM MMU at runtime.
>
> +config KVM_EMULATOR_TEST
> +        tristate "KVM emulator self test"
> +	depends on KVM
> +	---help---
> +	 Build test code that checks the x86 emulator during boot or module
> +         insertion.  If built as a module, it will be called test-emulator.ko.
> +
> +	 The emulator test will run for as many iterations as are specified by
> +         the emulator_test.iterations parameter; all processors will be
> +         utilized.  When the test is complete, results are reported in dmesg.
> +
>   # OK, it's a little counter-intuitive to do this, but it puts it neatly under
>   # the virtualization menu.
>   source drivers/vhost/Kconfig
> diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
> index f15501f..fc4a9e2 100644
> --- a/arch/x86/kvm/Makefile
> +++ b/arch/x86/kvm/Makefile
> @@ -19,3 +19,4 @@ kvm-amd-y		+= svm.o
>   obj-$(CONFIG_KVM)	+= kvm.o
>   obj-$(CONFIG_KVM_INTEL)	+= kvm-intel.o
>   obj-$(CONFIG_KVM_AMD)	+= kvm-amd.o
> +obj-$(CONFIG_KVM_EMULATOR_TEST) += test-emulator.o
> diff --git a/arch/x86/kvm/test-emulator.c b/arch/x86/kvm/test-emulator.c
> new file mode 100644
> index 0000000..1e3a22f
> --- /dev/null
> +++ b/arch/x86/kvm/test-emulator.c
> @@ -0,0 +1,533 @@
> +/*
> + * x86 instruction emulator test
> + *
> + * Copyright 2011 Red Hat, Inc. and/or its affiliates.
> + *
> + * Authors:
> + *   Avi Kivity<avi@redhat.com>
> + *
> + * This work is licensed under the terms of the GNU GPL, version 2.  See
> + * the COPYING file in the top-level directory.
> + *
> + */
> +
> +#include<linux/module.h>
> +#include<linux/kernel.h>
> +#include<linux/random.h>
> +#include<asm/kvm_host.h>
> +#include<asm/kvm_emulate.h>
> +#include<asm/i387.h>

I still haven't gone through all the code, but it's a good idea to put a 
MODULE_LICENSE("GPL") macro around here, so the build system doesn't 
complain about it:

WARNING: modpost: missing MODULE_LICENSE() in arch/x86/kvm/test-emulator.o
see include/linux/module.h for more information

> +static ulong iterations = 0;
> +module_param(iterations, ulong, S_IRUGO);
> +
> +struct test_context {
> +	struct work_struct work;
> +	struct completion completion;
> +	struct x86_emulate_ctxt ctxt;
> +	struct test_context *next;
> +	bool failed;
> +	u8 insn[15];
> +	bool insn_base_valid;
> +	ulong insn_base;
> +	struct test_seg {
> +		u16 selector;
> +		struct desc_struct desc;
> +		u32 base3;
> +		bool valid;
> +	} segs[8];
> +	ulong iterations;
> +	ulong completed;
> +	ulong decoded;
> +	ulong emulated;
> +	ulong nofault;
> +	ulong failures;
> +};
> +
> +static u64 random64(void)
> +{
> +	return random32() | ((u64)random32()<<  32);
> +}
> +
> +static ulong randlong(void)
> +{
> +	if (sizeof(ulong) == sizeof(u32))
> +		return random32();
> +	else
> +		return random64();
> +}
> +
> +static struct test_context *to_test(struct x86_emulate_ctxt *ctxt)
> +{
> +	return container_of(ctxt, struct test_context, ctxt);
> +}
> +
> +static void fail(struct x86_emulate_ctxt *ctxt, const char *msg, ...)
> +	__attribute__((format(printf, 2, 3)));
> +
> +static void fail(struct x86_emulate_ctxt *ctxt, const char *msg, ...)
> +{
> +	va_list args;
> +	char s[200];
> +
> +	va_start(args, msg);
> +	vsnprintf(s, sizeof(s), msg, args);
> +	va_end(args);
> +	printk("emulator test failure: %s\n", s);
> +	to_test(ctxt)->failed = true;
> +}
> +
> +static int test_fill_exception(struct x86_exception *ex)
> +{
> +	if (random32() % 4 == 0) {
> +		if (ex) {
> +			ex->vector = random32();
> +			ex->error_code_valid = random32();
> +			ex->error_code = random32();
> +			ex->nested_page_fault = random32();
> +			ex->address = random64();
> +		}
> +		return X86EMUL_PROPAGATE_FAULT;
> +	}
> +	return X86EMUL_CONTINUE;
> +}
> +
> +static int rand_error(void)
> +{
> +	switch (random32() % 8) {
> +	case 0: return X86EMUL_UNHANDLEABLE;
> +	case 1: return X86EMUL_IO_NEEDED;
> +	default: return X86EMUL_CONTINUE;
> +	}
> +}
> +
> +static int test_read(struct x86_emulate_ctxt *ctxt,
> +		     unsigned long addr, void *val,
> +		     unsigned int bytes,
> +		     struct x86_exception *fault)
> +{
> +	unsigned i;
> +
> +	if (bytes>  32 || bytes == 0)
> +		fail(ctxt, "read %x bytes", bytes);
> +
> +	for (i = 0; i<  bytes; ++i)
> +		*(u8 *)(val + i) = random32();
> +
> +	return test_fill_exception(fault);
> +}
> +
> +static int test_write(struct x86_emulate_ctxt *ctxt,
> +		      unsigned long addr, const void *val, unsigned int bytes,
> +		      struct x86_exception *fault)
> +{
> +	if (bytes>  32 || bytes == 0)
> +		fail(ctxt, "write %x bytes", bytes);
> +
> +	return test_fill_exception(fault);
> +}
> +
> +static int test_fetch(struct x86_emulate_ctxt *ctxt,
> +		      unsigned long addr, void *val,
> +		      unsigned int bytes,
> +		      struct x86_exception *fault)
> +{
> +	struct test_context *test = to_test(ctxt);
> +
> +	if (bytes>  15 || bytes == 0)
> +		fail(ctxt, "fetch %x bytes", bytes);
> +
> +	if (!test->insn_base_valid) {
> +		test->insn_base_valid = true;
> +		test->insn_base = addr;
> +	}
> +	addr -= test->insn_base;
> +	if (addr>= 15 || addr + bytes>  15)
> +		fail(ctxt, "fetch %x from %lx vs %lx",
> +		     bytes, addr + test->insn_base, test->insn_base);
> +	else
> +		memcpy(val, test->insn + addr, bytes);
> +
> +	return test_fill_exception(fault);
> +}
> +
> +static int test_cmpxchg(struct x86_emulate_ctxt *ctxt,
> +			unsigned long addr,
> +			const void *old,
> +			const void *new,
> +			unsigned int bytes,
> +			struct x86_exception *fault)
> +{
> +	if (bytes>  16 || bytes == 0 || hweight32(bytes) != 1)
> +		fail(ctxt, "cmpxchg %x bytes", bytes);
> +
> +	return test_fill_exception(fault);
> +}
> +
> +static void test_invlpg(struct x86_emulate_ctxt *ctxt, ulong addr)
> +{
> +}
> +
> +static int test_pio_in(struct x86_emulate_ctxt *ctxt,
> +		       int size, unsigned short port, void *val,
> +		       unsigned int count)
> +{
> +	if ((size != 1&&  size != 2&&  size != 4)
> +	    || (count == 0 || count * size>  4096))
> +		fail(ctxt, "pio_in_emulated: size %x count %x\n", size, count);
> +
> +	return rand_error();
> +}
> +
> +static int test_pio_out(struct x86_emulate_ctxt *ctxt,
> +			int size, unsigned short port, const void *val,
> +			unsigned int count)
> +{
> +	if ((size != 1&&  size != 2&&  size != 4)
> +	    || (count == 0 || count * size>  4096))
> +		fail(ctxt, "pio_out_emulated: size %x count %x\n", size, count);
> +
> +	return rand_error();
> +}
> +
> +static bool test_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
> +			     struct desc_struct *desc, u32 *base3, int seg)
> +{
> +	struct test_context *test = to_test(ctxt);
> +	struct test_seg *s =&test->segs[seg];
> +
> +	if (seg<  0 || seg>  7)
> +		fail(ctxt, "bad segment %d\n", seg);
> +
> +	if (!s->valid) {
> +		s->valid = true;
> +		s->selector = random32();
> +		s->desc.a = random32();
> +		s->desc.b = random32();
> +		s->base3 = random32();
> +	}
> +
> +	*selector = s->selector;
> +	desc->a = s->desc.a;
> +	desc->b = s->desc.b;
> +	if (base3)
> +		*base3 = s->base3;
> +
> +	return random32()&  1;
> +}
> +
> +static void test_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
> +			     struct desc_struct *desc, u32 base3, int seg)
> +{
> +	if (seg<  0 || seg>  5)
> +		fail(ctxt, "bad segment %d\n", seg);
> +}
> +
> +static ulong segment_base(struct x86_emulate_ctxt *ctxt,
> +			  struct desc_struct *d, u32 base3)
> +{
> +	unsigned long v;
> +
> +	v = get_desc_base(d);
> +	if (ctxt->mode == X86EMUL_MODE_PROT64
> +	&&  d->s == 0&&  (d->type == 2 || d->type == 9 || d->type == 11))
> +		v |= (u64)base3<<  32;
> +	return v;
> +}
> +
> +static unsigned long test_get_cached_segment_base(struct x86_emulate_ctxt *ctxt,
> +						  int seg)
> +{
> +	u16 selector;
> +	struct desc_struct desc;
> +	u32 base3;
> +
> +	test_get_segment(ctxt,&selector,&desc,&base3, seg);
> +	return segment_base(ctxt,&desc, base3);
> +}
> +
> +static void test_get_desc_table(struct x86_emulate_ctxt *ctxt,
> +				struct desc_ptr *dt)
> +{
> +	dt->size = random32();
> +	dt->address = randlong();
> +}
> +
> +static void test_set_desc_table(struct x86_emulate_ctxt *ctxt,
> +				struct desc_ptr *dt)
> +{
> +}
> +
> +static bool valid_cr[] = {
> +	[0] = true, [2] = true, [3] = true, [4] = true, [8] = true,
> +};
> +
> +static void check_cr(struct x86_emulate_ctxt *ctxt, int cr)
> +{
> +	if (cr<  0 || cr>  ARRAY_SIZE(valid_cr) || !valid_cr[cr])
> +		fail(ctxt, "bad cr %d\n", cr);
> +}
> +
> +static ulong test_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
> +{
> +	check_cr(ctxt, cr);
> +	return randlong();
> +}
> +
> +static int test_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
> +{
> +	check_cr(ctxt, cr);
> +	return random32()&  1;
> +}
> +
> +static int test_cpl(struct x86_emulate_ctxt *ctxt)
> +{
> +	return random32()&  3;
> +}
> +
> +static void check_dr(struct x86_emulate_ctxt *ctxt, int dr)
> +{
> +	if (dr<  0 || dr>  7)
> +		fail(ctxt, "bad dr %d\n", dr);
> +}
> +
> +static int test_get_dr(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest)
> +{
> +	check_dr(ctxt, dr);
> +	*dest = randlong();
> +	return random32()&  1;
> +}
> +
> +static int test_set_dr(struct x86_emulate_ctxt *ctxt, int dr, ulong value)
> +{
> +	check_dr(ctxt, dr);
> +	return random32()&  1;
> +}
> +
> +static int test_set_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data)
> +{
> +	return random32()&  1;
> +}
> +
> +static int test_get_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index,
> +			u64 *pdata)
> +{
> +	*pdata = random64();
> +	return random32()&  1;
> +}
> +
> +static void test_halt(struct x86_emulate_ctxt *ctxt)
> +{
> +}
> +
> +static void test_wbinvd(struct x86_emulate_ctxt *ctxt)
> +{
> +}
> +
> +static int test_fix_hypercall(struct x86_emulate_ctxt *ctxt)
> +{
> +	return rand_error();
> +}
> +
> +static void test_get_fpu(struct x86_emulate_ctxt *ctxt)
> +{
> +	kernel_fpu_begin();
> +	/* FIXME: randomize state? */
> +}
> +
> +static void test_put_fpu(struct x86_emulate_ctxt *ctxt)
> +{
> +	kernel_fpu_end();
> +}
> +
> +static int test_intercept(struct x86_emulate_ctxt *ctxt,
> +			  struct x86_instruction_info *info,
> +			  enum x86_intercept_stage stage)
> +{
> +	return X86EMUL_CONTINUE;
> +}
> +
> +static struct x86_emulate_ops test_ops = {
> +	.read_std = test_read,
> +	.write_std = test_write,
> +	.fetch = test_fetch,
> +	.read_emulated = test_read,
> +	.write_emulated = test_write,
> +	.cmpxchg_emulated = test_cmpxchg,
> +	.invlpg = test_invlpg,
> +	.pio_in_emulated = test_pio_in,
> +	.pio_out_emulated = test_pio_out,
> +	.get_segment = test_get_segment,
> +	.set_segment = test_set_segment,
> +	.get_cached_segment_base = test_get_cached_segment_base,
> +	.get_gdt = test_get_desc_table,
> +	.get_idt = test_get_desc_table,
> +	.set_gdt = test_set_desc_table,
> +	.set_idt = test_set_desc_table,
> +	.get_cr = test_get_cr,
> +	.set_cr = test_set_cr,
> +	.cpl = test_cpl,
> +	.get_dr = test_get_dr,
> +	.set_dr = test_set_dr,
> +	.set_msr = test_set_msr,
> +	.get_msr = test_get_msr,
> +	.halt = test_halt,
> +	.wbinvd = test_wbinvd,
> +	.fix_hypercall = test_fix_hypercall,
> +	.get_fpu = test_get_fpu,
> +	.put_fpu = test_put_fpu,
> +	.intercept = test_intercept,
> +};
> +
> +static int modes[] = {
> +	X86EMUL_MODE_REAL,
> +	X86EMUL_MODE_VM86,
> +	X86EMUL_MODE_PROT16,
> +	X86EMUL_MODE_PROT32,
> +	X86EMUL_MODE_PROT64,
> +};
> +
> +static int test_emulator_one(struct test_context *test)
> +{
> +	struct x86_emulate_ctxt *ctxt =&test->ctxt;
> +	unsigned i;
> +	int r;
> +
> +	test->failed = false;
> +	i = 0;
> +	if (random32()&  1)
> +		test->insn[i++] = 0x0f;
> +	for (; i<  15; ++i)
> +		test->insn[i++] = random32();
> +	test->insn_base_valid = false;
> +	ctxt->ops =&test_ops;
> +	ctxt->eflags = randlong();
> +	ctxt->eip = randlong();
> +	ctxt->mode = modes[random32() % ARRAY_SIZE(modes)];
> +	ctxt->guest_mode = random32() % 16 == 0;
> +	ctxt->perm_ok = random32() % 16 == 0;
> +	ctxt->only_vendor_specific_insn = random32() % 64 == 0;
> +	memset(&ctxt->twobyte, 0,
> +	       (void *)&ctxt->regs - (void *)&ctxt->twobyte);
> +	for (i = 0; i<  NR_VCPU_REGS; ++i)
> +		ctxt->regs[i] = randlong();
> +	r = x86_decode_insn(ctxt, NULL, 0);
> +	if (r == EMULATION_OK) {
> +		++test->decoded;
> +		r = x86_emulate_insn(ctxt);
> +		if (r == EMULATION_OK) {
> +			++test->emulated;
> +			if (!ctxt->have_exception)
> +				++test->nofault;
> +		}
> +	}
> +
> +	++test->completed;
> +
> +	return test->failed ? -EINVAL : 0;
> +}
> +
> +static const char *regnames[] = {
> +	[VCPU_REGS_RAX] = "rax",
> +	[VCPU_REGS_RBX] = "rbx",
> +	[VCPU_REGS_RCX] = "rcx",
> +	[VCPU_REGS_RDX] = "rdx",
> +	[VCPU_REGS_RSI] = "rsi",
> +	[VCPU_REGS_RDI] = "rdi",
> +	[VCPU_REGS_RSP] = "rsp",
> +	[VCPU_REGS_RBP] = "rbp",
> +	[VCPU_REGS_R8] = "r8",
> +	[VCPU_REGS_R9] = "r9",
> +	[VCPU_REGS_R10] = "r10",
> +	[VCPU_REGS_R11] = "r11",
> +	[VCPU_REGS_R12] = "r12",
> +	[VCPU_REGS_R13] = "r13",
> +	[VCPU_REGS_R14] = "r14",
> +	[VCPU_REGS_R15] = "r15",
> +	[VCPU_REGS_RIP] = "rip",
> +};
> +
> +static void dump_test_context(struct test_context *test)
> +{
> +	unsigned i;
> +
> +	printk("instruction: %02x %02x %02x %02x %02x %02x %02x %02x"
> +	       " %02x %02x %02x %02x %02x %02x %02x\n",
> +	       test->insn[0], test->insn[1], test->insn[2], test->insn[3],
> +	       test->insn[4], test->insn[5], test->insn[6], test->insn[7],
> +	       test->insn[8], test->insn[9], test->insn[10], test->insn[11],
> +	       test->insn[12], test->insn[13], test->insn[14]);
> +	for (i = 0; i<  NR_VCPU_REGS; ++i)
> +		printk("  %s: %016llx\n", regnames[i], (u64)test->ctxt.regs[i]);
> +}
> +
> +static void test_emulator_thread(struct work_struct *work)
> +{
> +	int i, ret;
> +	struct test_context *test
> +		= container_of(work, struct test_context, work);
> +
> +	for (i = 0, ret = 0; i<  test->iterations&&  ret == 0; ++i) {
> +		ret = test_emulator_one(test);
> +		cond_resched();
> +	}
> +
> +	if (ret) {
> +		++test->failures;
> +		printk("test failure in instruction %i\n", i);
> +		dump_test_context(test);
> +	}
> +
> +	complete(&test->completion);
> +}
> +
> +static __init int test_emulator(void)
> +{
> +	int r, cpu, remain;
> +	struct test_context *test = NULL, *tmp;
> +	ulong completed = 0, decoded = 0, emulated = 0, nofault = 0;
> +	ulong failures = 0;
> +
> +	if (!iterations)
> +		return 0;
> +
> +	pr_info("starting emulator test\n");
> +	remain = num_online_cpus();
> +	for_each_online_cpu(cpu) {
> +		tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
> +		r = -ENOMEM;
> +		if (!tmp)
> +			break;
> +		tmp->next = test;
> +		test = tmp;
> +		test->iterations = iterations / remain--;
> +		iterations -= test->iterations;
> +		INIT_WORK(&test->work, test_emulator_thread);
> +		init_completion(&test->completion);
> +		schedule_work_on(cpu,&test->work);
> +	}
> +	while (test) {
> +		wait_for_completion(&test->completion);
> +		completed += test->completed;
> +		decoded += test->decoded;
> +		emulated += test->emulated;
> +		nofault += test->nofault;
> +		failures += test->failures;
> +		tmp = test;
> +		test = test->next;
> +		kfree(tmp);
> +	}
> +	pr_info("emulator fuzz test results\n");
> +	pr_info("  instructions:   %12ld\n", completed);
> +	pr_info("  decoded:        %12ld\n", decoded);
> +	pr_info("  emulated:       %12ld\n", emulated);
> +	pr_info("  nofault:        %12ld\n", nofault);
> +	pr_info("  failures:       %12ld\n", failures);
> +	if (failures || remain)
> +		pr_err("emulator test: FAIL\n");
> +	else
> +		pr_info("emulator test: PASS\n");
> +	return 0;
> +}
> +
> +module_init(test_emulator)

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity Aug. 29, 2011, 12:01 p.m. UTC | #4
On 08/26/2011 01:17 AM, Lucas Meneghel Rodrigues wrote:
>
> I still haven't gone through all the code, but it's a good idea to put 
> a MODULE_LICENSE("GPL") macro around here, so the build system doesn't 
> complain about it:
>
> WARNING: modpost: missing MODULE_LICENSE() in 
> arch/x86/kvm/test-emulator.o
> see include/linux/module.h for more information
>

Thanks; fixed.
Avi Kivity Aug. 29, 2011, 12:42 p.m. UTC | #5
On 08/25/2011 10:04 PM, Avi Kivity wrote:
>
>> Also fuzzing from an actual guest is useful to test the real backend
>> functions. What problem did you encounter? The new testsuite scheme
>> seems a good fit for that (with the exception of being locked to 32-bit
>> mode).
>
> Mostly that I forgot it exists.  Other issues are that it's harder to 
> force random values through it - though I could allocate a couple GB 
> and fill it with random values.  We also lose the ability to test 
> inputs to callbacks (not that I do much of that here).

Further issues would be:

- much slower - heavyweight exit on every insn, KVM_SET_SREGS, etc.
- need to set up GDT/LDT, I guess we can do this once and fill it with 
random entries
- much more care in setting up registers so we can get a context that runs
- need to figure out where %rip EA is so we can put insn there, hope it 
doesn't conflict with other code

So it will probably work, but the result will be of lower quality.
diff mbox

Patch

diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index 0e9dec6..0d80e6f 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -1,5 +1,6 @@ 
 
 obj-$(CONFIG_KVM) += kvm/
+obj-$(CONFIG_KVM_EMULATOR_TEST) += kvm/
 
 # Xen paravirtualization support
 obj-$(CONFIG_XEN) += xen/
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index ff5790d..9ffc30a 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -76,6 +76,17 @@  config KVM_MMU_AUDIT
 	 This option adds a R/W kVM module parameter 'mmu_audit', which allows
 	 audit  KVM MMU at runtime.
 
+config KVM_EMULATOR_TEST
+        tristate "KVM emulator self test"
+	depends on KVM
+	---help---
+	 Build test code that checks the x86 emulator during boot or module
+         insertion.  If built as a module, it will be called test-emulator.ko.
+
+	 The emulator test will run for as many iterations as are specified by
+         the emulator_test.iterations parameter; all processors will be
+         utilized.  When the test is complete, results are reported in dmesg.
+
 # OK, it's a little counter-intuitive to do this, but it puts it neatly under
 # the virtualization menu.
 source drivers/vhost/Kconfig
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index f15501f..fc4a9e2 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -19,3 +19,4 @@  kvm-amd-y		+= svm.o
 obj-$(CONFIG_KVM)	+= kvm.o
 obj-$(CONFIG_KVM_INTEL)	+= kvm-intel.o
 obj-$(CONFIG_KVM_AMD)	+= kvm-amd.o
+obj-$(CONFIG_KVM_EMULATOR_TEST) += test-emulator.o
diff --git a/arch/x86/kvm/test-emulator.c b/arch/x86/kvm/test-emulator.c
new file mode 100644
index 0000000..1e3a22f
--- /dev/null
+++ b/arch/x86/kvm/test-emulator.c
@@ -0,0 +1,533 @@ 
+/*
+ * x86 instruction emulator test
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates.
+ *
+ * Authors:
+ *   Avi Kivity   <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/i387.h>
+
+static ulong iterations = 0;
+module_param(iterations, ulong, S_IRUGO);
+
+struct test_context {
+	struct work_struct work;
+	struct completion completion;
+	struct x86_emulate_ctxt ctxt;
+	struct test_context *next;
+	bool failed;
+	u8 insn[15];
+	bool insn_base_valid;
+	ulong insn_base;
+	struct test_seg {
+		u16 selector;
+		struct desc_struct desc;
+		u32 base3;
+		bool valid;
+	} segs[8];
+	ulong iterations;
+	ulong completed;
+	ulong decoded;
+	ulong emulated;
+	ulong nofault;
+	ulong failures;
+};
+
+static u64 random64(void)
+{
+	return random32() | ((u64)random32() << 32);
+}
+
+static ulong randlong(void)
+{
+	if (sizeof(ulong) == sizeof(u32))
+		return random32();
+	else
+		return random64();
+}
+
+static struct test_context *to_test(struct x86_emulate_ctxt *ctxt)
+{
+	return container_of(ctxt, struct test_context, ctxt);
+}
+
+static void fail(struct x86_emulate_ctxt *ctxt, const char *msg, ...)
+	__attribute__((format(printf, 2, 3)));
+
+static void fail(struct x86_emulate_ctxt *ctxt, const char *msg, ...)
+{
+	va_list args;
+	char s[200];
+
+	va_start(args, msg);
+	vsnprintf(s, sizeof(s), msg, args);
+	va_end(args);
+	printk("emulator test failure: %s\n", s);
+	to_test(ctxt)->failed = true;
+}
+
+static int test_fill_exception(struct x86_exception *ex)
+{
+	if (random32() % 4 == 0) {
+		if (ex) {
+			ex->vector = random32();
+			ex->error_code_valid = random32();
+			ex->error_code = random32();
+			ex->nested_page_fault = random32();
+			ex->address = random64();
+		}
+		return X86EMUL_PROPAGATE_FAULT;
+	}
+	return X86EMUL_CONTINUE;
+}
+
+static int rand_error(void)
+{
+	switch (random32() % 8) {
+	case 0: return X86EMUL_UNHANDLEABLE;
+	case 1: return X86EMUL_IO_NEEDED;
+	default: return X86EMUL_CONTINUE;
+	}
+}
+
+static int test_read(struct x86_emulate_ctxt *ctxt,
+		     unsigned long addr, void *val,
+		     unsigned int bytes,
+		     struct x86_exception *fault)
+{
+	unsigned i;
+
+	if (bytes > 32 || bytes == 0)
+		fail(ctxt, "read %x bytes", bytes);
+
+	for (i = 0; i < bytes; ++i)
+		*(u8 *)(val + i) = random32();
+
+	return test_fill_exception(fault);
+}
+
+static int test_write(struct x86_emulate_ctxt *ctxt,
+		      unsigned long addr, const void *val, unsigned int bytes,
+		      struct x86_exception *fault)
+{
+	if (bytes > 32 || bytes == 0)
+		fail(ctxt, "write %x bytes", bytes);
+
+	return test_fill_exception(fault);
+}
+
+static int test_fetch(struct x86_emulate_ctxt *ctxt,
+		      unsigned long addr, void *val,
+		      unsigned int bytes,
+		      struct x86_exception *fault)
+{
+	struct test_context *test = to_test(ctxt);
+
+	if (bytes > 15 || bytes == 0)
+		fail(ctxt, "fetch %x bytes", bytes);
+
+	if (!test->insn_base_valid) {
+		test->insn_base_valid = true;
+		test->insn_base = addr;
+	}
+	addr -= test->insn_base;
+	if (addr >= 15 || addr + bytes > 15)
+		fail(ctxt, "fetch %x from %lx vs %lx",
+		     bytes, addr + test->insn_base, test->insn_base);
+	else
+		memcpy(val, test->insn + addr, bytes);
+
+	return test_fill_exception(fault);
+}
+
+static int test_cmpxchg(struct x86_emulate_ctxt *ctxt,
+			unsigned long addr,
+			const void *old,
+			const void *new,
+			unsigned int bytes,
+			struct x86_exception *fault)
+{
+	if (bytes > 16 || bytes == 0 || hweight32(bytes) != 1)
+		fail(ctxt, "cmpxchg %x bytes", bytes);
+
+	return test_fill_exception(fault);
+}
+
+static void test_invlpg(struct x86_emulate_ctxt *ctxt, ulong addr)
+{
+}
+
+static int test_pio_in(struct x86_emulate_ctxt *ctxt,
+		       int size, unsigned short port, void *val,
+		       unsigned int count)
+{
+	if ((size != 1 && size != 2 && size != 4)
+	    || (count == 0 || count * size > 4096))
+		fail(ctxt, "pio_in_emulated: size %x count %x\n", size, count);
+
+	return rand_error();
+}
+
+static int test_pio_out(struct x86_emulate_ctxt *ctxt,
+			int size, unsigned short port, const void *val,
+			unsigned int count)
+{
+	if ((size != 1 && size != 2 && size != 4)
+	    || (count == 0 || count * size > 4096))
+		fail(ctxt, "pio_out_emulated: size %x count %x\n", size, count);
+
+	return rand_error();
+}
+
+static bool test_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
+			     struct desc_struct *desc, u32 *base3, int seg)
+{
+	struct test_context *test = to_test(ctxt);
+	struct test_seg *s = &test->segs[seg];
+
+	if (seg < 0 || seg > 7)
+		fail(ctxt, "bad segment %d\n", seg);
+
+	if (!s->valid) {
+		s->valid = true;
+		s->selector = random32();
+		s->desc.a = random32();
+		s->desc.b = random32();
+		s->base3 = random32();
+	}
+
+	*selector = s->selector;
+	desc->a = s->desc.a;
+	desc->b = s->desc.b;
+	if (base3)
+		*base3 = s->base3;
+
+	return random32() & 1;
+}
+
+static void test_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
+			     struct desc_struct *desc, u32 base3, int seg)
+{
+	if (seg < 0 || seg > 5)
+		fail(ctxt, "bad segment %d\n", seg);
+}
+
+static ulong segment_base(struct x86_emulate_ctxt *ctxt,
+			  struct desc_struct *d, u32 base3)
+{
+	unsigned long v;
+
+	v = get_desc_base(d);
+	if (ctxt->mode == X86EMUL_MODE_PROT64
+	    && d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
+		v |= (u64)base3 << 32;
+	return v;
+}
+
+static unsigned long test_get_cached_segment_base(struct x86_emulate_ctxt *ctxt,
+						  int seg)
+{
+	u16 selector;
+	struct desc_struct desc;
+	u32 base3;
+
+	test_get_segment(ctxt, &selector, &desc, &base3, seg);
+	return segment_base(ctxt, &desc, base3);
+}
+
+static void test_get_desc_table(struct x86_emulate_ctxt *ctxt,
+				struct desc_ptr *dt)
+{
+	dt->size = random32();
+	dt->address = randlong();
+}
+
+static void test_set_desc_table(struct x86_emulate_ctxt *ctxt,
+				struct desc_ptr *dt)
+{
+}
+
+static bool valid_cr[] = {
+	[0] = true, [2] = true, [3] = true, [4] = true, [8] = true,
+};
+
+static void check_cr(struct x86_emulate_ctxt *ctxt, int cr)
+{
+	if (cr < 0 || cr > ARRAY_SIZE(valid_cr) || !valid_cr[cr])
+		fail(ctxt, "bad cr %d\n", cr);
+}
+
+static ulong test_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
+{
+	check_cr(ctxt, cr);
+	return randlong();
+}
+
+static int test_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
+{
+	check_cr(ctxt, cr);
+	return random32() & 1;
+}
+
+static int test_cpl(struct x86_emulate_ctxt *ctxt)
+{
+	return random32() & 3;
+}
+
+static void check_dr(struct x86_emulate_ctxt *ctxt, int dr)
+{
+	if (dr < 0 || dr > 7)
+		fail(ctxt, "bad dr %d\n", dr);
+}
+
+static int test_get_dr(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest)
+{
+	check_dr(ctxt, dr);
+	*dest = randlong();
+	return random32() & 1;
+}
+
+static int test_set_dr(struct x86_emulate_ctxt *ctxt, int dr, ulong value)
+{
+	check_dr(ctxt, dr);
+	return random32() & 1;
+}
+
+static int test_set_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data)
+{
+	return random32() & 1;
+}
+
+static int test_get_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index,
+			u64 *pdata)
+{
+	*pdata = random64();
+	return random32() & 1;
+}
+
+static void test_halt(struct x86_emulate_ctxt *ctxt)
+{
+}
+
+static void test_wbinvd(struct x86_emulate_ctxt *ctxt)
+{
+}
+
+static int test_fix_hypercall(struct x86_emulate_ctxt *ctxt)
+{
+	return rand_error();
+}
+
+static void test_get_fpu(struct x86_emulate_ctxt *ctxt)
+{
+	kernel_fpu_begin();
+	/* FIXME: randomize state? */
+}
+
+static void test_put_fpu(struct x86_emulate_ctxt *ctxt)
+{
+	kernel_fpu_end();
+}
+
+static int test_intercept(struct x86_emulate_ctxt *ctxt,
+			  struct x86_instruction_info *info,
+			  enum x86_intercept_stage stage)
+{
+	return X86EMUL_CONTINUE;
+}
+
+static struct x86_emulate_ops test_ops = {
+	.read_std = test_read,
+	.write_std = test_write,
+	.fetch = test_fetch,
+	.read_emulated = test_read,
+	.write_emulated = test_write,
+	.cmpxchg_emulated = test_cmpxchg,
+	.invlpg = test_invlpg,
+	.pio_in_emulated = test_pio_in,
+	.pio_out_emulated = test_pio_out,
+	.get_segment = test_get_segment,
+	.set_segment = test_set_segment,
+	.get_cached_segment_base = test_get_cached_segment_base,
+	.get_gdt = test_get_desc_table,
+	.get_idt = test_get_desc_table,
+	.set_gdt = test_set_desc_table,
+	.set_idt = test_set_desc_table,
+	.get_cr = test_get_cr,
+	.set_cr = test_set_cr,
+	.cpl = test_cpl,
+	.get_dr = test_get_dr,
+	.set_dr = test_set_dr,
+	.set_msr = test_set_msr,
+	.get_msr = test_get_msr,
+	.halt = test_halt,
+	.wbinvd = test_wbinvd,
+	.fix_hypercall = test_fix_hypercall,
+	.get_fpu = test_get_fpu,
+	.put_fpu = test_put_fpu,
+	.intercept = test_intercept,
+};
+
+static int modes[] = {
+	X86EMUL_MODE_REAL,
+	X86EMUL_MODE_VM86,
+	X86EMUL_MODE_PROT16,
+	X86EMUL_MODE_PROT32,
+	X86EMUL_MODE_PROT64,
+};
+
+static int test_emulator_one(struct test_context *test)
+{
+	struct x86_emulate_ctxt *ctxt = &test->ctxt;
+	unsigned i;
+	int r;
+
+	test->failed = false;
+	i = 0;
+	if (random32() & 1)
+		test->insn[i++] = 0x0f;
+	for (; i < 15; ++i)
+		test->insn[i++] = random32();
+	test->insn_base_valid = false;
+	ctxt->ops = &test_ops;
+	ctxt->eflags = randlong();
+	ctxt->eip = randlong();
+	ctxt->mode = modes[random32() % ARRAY_SIZE(modes)];
+	ctxt->guest_mode = random32() % 16 == 0;
+	ctxt->perm_ok = random32() % 16 == 0;
+	ctxt->only_vendor_specific_insn = random32() % 64 == 0;
+	memset(&ctxt->twobyte, 0,
+	       (void *)&ctxt->regs - (void *)&ctxt->twobyte);
+	for (i = 0; i < NR_VCPU_REGS; ++i)
+		ctxt->regs[i] = randlong();
+	r = x86_decode_insn(ctxt, NULL, 0);
+	if (r == EMULATION_OK) {
+		++test->decoded;
+		r = x86_emulate_insn(ctxt);
+		if (r == EMULATION_OK) {
+			++test->emulated;
+			if (!ctxt->have_exception)
+				++test->nofault;
+		}
+	}
+
+	++test->completed;
+
+	return test->failed ? -EINVAL : 0;
+}
+
+static const char *regnames[] = {
+	[VCPU_REGS_RAX] = "rax",
+	[VCPU_REGS_RBX] = "rbx",
+	[VCPU_REGS_RCX] = "rcx",
+	[VCPU_REGS_RDX] = "rdx",
+	[VCPU_REGS_RSI] = "rsi",
+	[VCPU_REGS_RDI] = "rdi",
+	[VCPU_REGS_RSP] = "rsp",
+	[VCPU_REGS_RBP] = "rbp",
+	[VCPU_REGS_R8] = "r8",
+	[VCPU_REGS_R9] = "r9",
+	[VCPU_REGS_R10] = "r10",
+	[VCPU_REGS_R11] = "r11",
+	[VCPU_REGS_R12] = "r12",
+	[VCPU_REGS_R13] = "r13",
+	[VCPU_REGS_R14] = "r14",
+	[VCPU_REGS_R15] = "r15",
+	[VCPU_REGS_RIP] = "rip",
+};
+
+static void dump_test_context(struct test_context *test)
+{
+	unsigned i;
+
+	printk("instruction: %02x %02x %02x %02x %02x %02x %02x %02x"
+	       " %02x %02x %02x %02x %02x %02x %02x\n",
+	       test->insn[0], test->insn[1], test->insn[2], test->insn[3],
+	       test->insn[4], test->insn[5], test->insn[6], test->insn[7],
+	       test->insn[8], test->insn[9], test->insn[10], test->insn[11],
+	       test->insn[12], test->insn[13], test->insn[14]);
+	for (i = 0; i < NR_VCPU_REGS; ++i)
+		printk("  %s: %016llx\n", regnames[i], (u64)test->ctxt.regs[i]);
+}
+
+static void test_emulator_thread(struct work_struct *work)
+{
+	int i, ret;
+	struct test_context *test
+		= container_of(work, struct test_context, work);
+
+	for (i = 0, ret = 0; i < test->iterations && ret == 0; ++i) {
+		ret = test_emulator_one(test);
+		cond_resched();
+	}
+
+	if (ret) {
+		++test->failures;
+		printk("test failure in instruction %i\n", i);
+		dump_test_context(test);
+	}
+
+	complete(&test->completion);
+}
+
+static __init int test_emulator(void)
+{
+	int r, cpu, remain;
+	struct test_context *test = NULL, *tmp;
+	ulong completed = 0, decoded = 0, emulated = 0, nofault = 0;
+	ulong failures = 0;
+
+	if (!iterations)
+		return 0;
+
+	pr_info("starting emulator test\n");
+	remain = num_online_cpus();
+	for_each_online_cpu(cpu) {
+		tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+		r = -ENOMEM;
+		if (!tmp)
+			break;
+		tmp->next = test;
+		test = tmp;
+		test->iterations = iterations / remain--;
+		iterations -= test->iterations;
+		INIT_WORK(&test->work, test_emulator_thread);
+		init_completion(&test->completion);
+		schedule_work_on(cpu, &test->work);
+	}
+	while (test) {
+		wait_for_completion(&test->completion);
+		completed += test->completed;
+		decoded += test->decoded;
+		emulated += test->emulated;
+		nofault += test->nofault;
+		failures += test->failures;
+		tmp = test;
+		test = test->next;
+		kfree(tmp);
+	}
+	pr_info("emulator fuzz test results\n");
+	pr_info("  instructions:   %12ld\n", completed);
+	pr_info("  decoded:        %12ld\n", decoded);
+	pr_info("  emulated:       %12ld\n", emulated);
+	pr_info("  nofault:        %12ld\n", nofault);
+	pr_info("  failures:       %12ld\n", failures);
+	if (failures || remain)
+		pr_err("emulator test: FAIL\n");
+	else
+		pr_info("emulator test: PASS\n");
+	return 0;
+}
+
+module_init(test_emulator)