Message ID | 1376409368-7016-3-git-send-email-yzt356@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 2013-08-13 17:56, Arthur Chunqi Li wrote: > Add testing for CR0/4 shadowing. A few sentences on the test strategy would be good. > > Signed-off-by: Arthur Chunqi Li <yzt356@gmail.com> > --- > lib/x86/vm.h | 4 + > x86/vmx_tests.c | 218 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 222 insertions(+) > > diff --git a/lib/x86/vm.h b/lib/x86/vm.h > index eff6f72..6e0ce2b 100644 > --- a/lib/x86/vm.h > +++ b/lib/x86/vm.h > @@ -17,9 +17,13 @@ > #define PTE_ADDR (0xffffffffff000ull) > > #define X86_CR0_PE 0x00000001 > +#define X86_CR0_MP 0x00000002 > +#define X86_CR0_TS 0x00000008 > #define X86_CR0_WP 0x00010000 > #define X86_CR0_PG 0x80000000 > #define X86_CR4_VMXE 0x00000001 > +#define X86_CR4_TSD 0x00000004 > +#define X86_CR4_DE 0x00000008 > #define X86_CR4_PSE 0x00000010 > #define X86_CR4_PAE 0x00000020 > #define X86_CR4_PCIDE 0x00020000 > diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c > index 61b0cef..44be3f4 100644 > --- a/x86/vmx_tests.c > +++ b/x86/vmx_tests.c > @@ -5,12 +5,18 @@ > > u64 ia32_pat; > u64 ia32_efer; > +u32 stage; > > static inline void vmcall() > { > asm volatile("vmcall"); > } > > +static inline void set_stage(u32 s) > +{ > + asm volatile("mov %0, stage\n\t"::"r"(s):"memory", "cc"); > +} > + Why do we need "state = s" as assembler instruction? > void basic_init() > { > } > @@ -257,6 +263,216 @@ static int test_ctrl_efer_exit_handler() > return VMX_TEST_VMEXIT; > } > > +u32 guest_cr0, guest_cr4; > + > +static void cr_shadowing_main() > +{ > + u32 cr0, cr4, tmp; > + > + // Test read through > + set_stage(0); > + guest_cr0 = read_cr0(); > + if (stage == 1) > + report("Read through CR0", 0); > + else > + vmcall(); > + set_stage(1); > + guest_cr4 = read_cr4(); > + if (stage == 2) > + report("Read through CR4", 0); > + else > + vmcall(); > + // Test write through > + guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP); > + guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE); > + set_stage(2); > + write_cr0(guest_cr0); > + if (stage == 3) > + report("Write throuth CR0", 0); > + else > + vmcall(); > + set_stage(3); > + write_cr4(guest_cr4); > + if (stage == 4) > + report("Write through CR4", 0); > + else > + vmcall(); > + // Test read shadow > + set_stage(4); > + vmcall(); > + cr0 = read_cr0(); > + if (stage != 5) { > + if (cr0 == guest_cr0) > + report("Read shadowing CR0", 1); > + else > + report("Read shadowing CR0", 0); > + } > + set_stage(5); > + cr4 = read_cr4(); > + if (stage != 6) { > + if (cr4 == guest_cr4) > + report("Read shadowing CR4", 1); > + else > + report("Read shadowing CR4", 0); > + } > + // Test write shadow (same value with shadow) > + set_stage(6); > + write_cr0(guest_cr0); > + if (stage == 7) > + report("Write shadowing CR0 (same value with shadow)", 0); > + else > + vmcall(); > + set_stage(7); > + write_cr4(guest_cr4); > + if (stage == 8) > + report("Write shadowing CR4 (same value with shadow)", 0); > + else > + vmcall(); > + // Test write shadow (different value) > + set_stage(8); > + tmp = guest_cr0 ^ X86_CR0_TS; > + asm volatile("mov %0, %%rsi\n\t" > + "mov %%rsi, %%cr0\n\t" > + ::"m"(tmp) > + :"rsi", "memory", "cc"); > + if (stage != 9) > + report("Write shadowing different X86_CR0_TS", 0); > + else > + report("Write shadowing different X86_CR0_TS", 1); > + set_stage(9); > + tmp = guest_cr0 ^ X86_CR0_MP; > + asm volatile("mov %0, %%rsi\n\t" > + "mov %%rsi, %%cr0\n\t" > + ::"m"(tmp) > + :"rsi", "memory", "cc"); > + if (stage != 10) > + report("Write shadowing different X86_CR0_MP", 0); > + else > + report("Write shadowing different X86_CR0_MP", 1); > + set_stage(10); > + tmp = guest_cr4 ^ X86_CR4_TSD; > + asm volatile("mov %0, %%rsi\n\t" > + "mov %%rsi, %%cr4\n\t" > + ::"m"(tmp) > + :"rsi", "memory", "cc"); > + if (stage != 11) > + report("Write shadowing different X86_CR4_TSD", 0); > + else > + report("Write shadowing different X86_CR4_TSD", 1); > + set_stage(11); > + tmp = guest_cr4 ^ X86_CR4_DE; > + asm volatile("mov %0, %%rsi\n\t" > + "mov %%rsi, %%cr4\n\t" > + ::"m"(tmp) > + :"rsi", "memory", "cc"); > + if (stage != 12) > + report("Write shadowing different X86_CR4_DE", 0); > + else > + report("Write shadowing different X86_CR4_DE", 1); > +} > + > +static int cr_shadowing_exit_handler() > +{ > + u64 guest_rip; > + ulong reason; > + u32 insn_len; > + u32 exit_qual; > + > + guest_rip = vmcs_read(GUEST_RIP); > + reason = vmcs_read(EXI_REASON) & 0xff; > + insn_len = vmcs_read(EXI_INST_LEN); > + exit_qual = vmcs_read(EXI_QUALIFICATION); > + switch (reason) { > + case VMX_VMCALL: > + switch (stage) { > + case 0: > + if (guest_cr0 == vmcs_read(GUEST_CR0)) > + report("Read through CR0", 1); > + else > + report("Read through CR0", 0); > + break; > + case 1: > + if (guest_cr4 == vmcs_read(GUEST_CR4)) > + report("Read through CR4", 1); > + else > + report("Read through CR4", 0); > + break; > + case 2: > + if (guest_cr0 == vmcs_read(GUEST_CR0)) > + report("Write through CR0", 1); > + else > + report("Write through CR0", 0); > + break; > + case 3: > + if (guest_cr4 == vmcs_read(GUEST_CR4)) > + report("Write through CR4", 1); > + else > + report("Write through CR4", 0); > + break; > + case 4: > + guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP); > + guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE); > + vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP); > + vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP)); > + vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE); > + vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE)); > + break; > + case 6: > + if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP))) > + report("Write shadowing CR0 (same value)", 1); > + else > + report("Write shadowing CR0 (same value)", 0); > + break; > + case 7: > + if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE))) > + report("Write shadowing CR4 (same value)", 1); > + else > + report("Write shadowing CR4 (same value)", 0); > + break; > + default: report("something went wrong", 0); ??? > + break; > + } > + vmcs_write(GUEST_RIP, guest_rip + insn_len); > + return VMX_TEST_RESUME; > + case VMX_CR: > + switch (stage) { > + case 4: > + report("Read shadowing CR0", 0); > + set_stage(stage + 1); > + break; > + case 5: > + report("Read shadowing CR4", 0); > + set_stage(stage + 1); > + break; > + case 6: > + report("Write shadowing CR0 (same value)", 0); > + set_stage(stage + 1); > + break; > + case 7: > + report("Write shadowing CR4 (same value)", 0); > + set_stage(stage + 1); > + break; > + case 8: > + case 9: > + if (exit_qual == 0x600) > + set_stage(stage + 1); What is this qualification about? Yes, ESI to CR0, but that takes a manual to find out. And what if exit_qual is something else? Is that a test error? > + break; > + case 10: > + case 11: > + if (exit_qual == 0x604) > + set_stage(stage + 1); Same as above. > + default: ...? > + break; > + } > + vmcs_write(GUEST_RIP, guest_rip + insn_len); > + return VMX_TEST_RESUME; > + default: > + printf("Unknown exit reason, %d\n", reason); > + print_vmexit_info(); > + } > + return VMX_TEST_VMEXIT; > +} > + > /* name/init/guest_main/exit_handler/syscall_handler/guest_regs > basic_* just implement some basic functions */ > struct vmx_test vmx_tests[] = { > @@ -268,5 +484,7 @@ struct vmx_test vmx_tests[] = { > test_ctrl_pat_exit_handler, basic_syscall_handler, {0} }, > { "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main, > test_ctrl_efer_exit_handler, basic_syscall_handler, {0} }, > + { "CR shadowing", basic_init, cr_shadowing_main, > + cr_shadowing_exit_handler, basic_syscall_handler, {0} }, > { NULL, NULL, NULL, NULL, NULL, {0} }, > }; > Jan
On Thu, Aug 15, 2013 at 3:30 PM, Jan Kiszka <jan.kiszka@web.de> wrote: > On 2013-08-13 17:56, Arthur Chunqi Li wrote: >> Add testing for CR0/4 shadowing. > > A few sentences on the test strategy would be good. > >> >> Signed-off-by: Arthur Chunqi Li <yzt356@gmail.com> >> --- >> lib/x86/vm.h | 4 + >> x86/vmx_tests.c | 218 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ >> 2 files changed, 222 insertions(+) >> >> diff --git a/lib/x86/vm.h b/lib/x86/vm.h >> index eff6f72..6e0ce2b 100644 >> --- a/lib/x86/vm.h >> +++ b/lib/x86/vm.h >> @@ -17,9 +17,13 @@ >> #define PTE_ADDR (0xffffffffff000ull) >> >> #define X86_CR0_PE 0x00000001 >> +#define X86_CR0_MP 0x00000002 >> +#define X86_CR0_TS 0x00000008 >> #define X86_CR0_WP 0x00010000 >> #define X86_CR0_PG 0x80000000 >> #define X86_CR4_VMXE 0x00000001 >> +#define X86_CR4_TSD 0x00000004 >> +#define X86_CR4_DE 0x00000008 >> #define X86_CR4_PSE 0x00000010 >> #define X86_CR4_PAE 0x00000020 >> #define X86_CR4_PCIDE 0x00020000 >> diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c >> index 61b0cef..44be3f4 100644 >> --- a/x86/vmx_tests.c >> +++ b/x86/vmx_tests.c >> @@ -5,12 +5,18 @@ >> >> u64 ia32_pat; >> u64 ia32_efer; >> +u32 stage; >> >> static inline void vmcall() >> { >> asm volatile("vmcall"); >> } >> >> +static inline void set_stage(u32 s) >> +{ >> + asm volatile("mov %0, stage\n\t"::"r"(s):"memory", "cc"); >> +} >> + > > Why do we need "state = s" as assembler instruction? This is due to assembler optimization. If we simply use "state = s", assembler will sometimes optimize it and state may not be set indeed. > >> void basic_init() >> { >> } >> @@ -257,6 +263,216 @@ static int test_ctrl_efer_exit_handler() >> return VMX_TEST_VMEXIT; >> } >> >> +u32 guest_cr0, guest_cr4; >> + >> +static void cr_shadowing_main() >> +{ >> + u32 cr0, cr4, tmp; >> + >> + // Test read through >> + set_stage(0); >> + guest_cr0 = read_cr0(); >> + if (stage == 1) >> + report("Read through CR0", 0); >> + else >> + vmcall(); >> + set_stage(1); >> + guest_cr4 = read_cr4(); >> + if (stage == 2) >> + report("Read through CR4", 0); >> + else >> + vmcall(); >> + // Test write through >> + guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP); >> + guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE); >> + set_stage(2); >> + write_cr0(guest_cr0); >> + if (stage == 3) >> + report("Write throuth CR0", 0); >> + else >> + vmcall(); >> + set_stage(3); >> + write_cr4(guest_cr4); >> + if (stage == 4) >> + report("Write through CR4", 0); >> + else >> + vmcall(); >> + // Test read shadow >> + set_stage(4); >> + vmcall(); >> + cr0 = read_cr0(); >> + if (stage != 5) { >> + if (cr0 == guest_cr0) >> + report("Read shadowing CR0", 1); >> + else >> + report("Read shadowing CR0", 0); >> + } >> + set_stage(5); >> + cr4 = read_cr4(); >> + if (stage != 6) { >> + if (cr4 == guest_cr4) >> + report("Read shadowing CR4", 1); >> + else >> + report("Read shadowing CR4", 0); >> + } >> + // Test write shadow (same value with shadow) >> + set_stage(6); >> + write_cr0(guest_cr0); >> + if (stage == 7) >> + report("Write shadowing CR0 (same value with shadow)", 0); >> + else >> + vmcall(); >> + set_stage(7); >> + write_cr4(guest_cr4); >> + if (stage == 8) >> + report("Write shadowing CR4 (same value with shadow)", 0); >> + else >> + vmcall(); >> + // Test write shadow (different value) >> + set_stage(8); >> + tmp = guest_cr0 ^ X86_CR0_TS; >> + asm volatile("mov %0, %%rsi\n\t" >> + "mov %%rsi, %%cr0\n\t" >> + ::"m"(tmp) >> + :"rsi", "memory", "cc"); >> + if (stage != 9) >> + report("Write shadowing different X86_CR0_TS", 0); >> + else >> + report("Write shadowing different X86_CR0_TS", 1); >> + set_stage(9); >> + tmp = guest_cr0 ^ X86_CR0_MP; >> + asm volatile("mov %0, %%rsi\n\t" >> + "mov %%rsi, %%cr0\n\t" >> + ::"m"(tmp) >> + :"rsi", "memory", "cc"); >> + if (stage != 10) >> + report("Write shadowing different X86_CR0_MP", 0); >> + else >> + report("Write shadowing different X86_CR0_MP", 1); >> + set_stage(10); >> + tmp = guest_cr4 ^ X86_CR4_TSD; >> + asm volatile("mov %0, %%rsi\n\t" >> + "mov %%rsi, %%cr4\n\t" >> + ::"m"(tmp) >> + :"rsi", "memory", "cc"); >> + if (stage != 11) >> + report("Write shadowing different X86_CR4_TSD", 0); >> + else >> + report("Write shadowing different X86_CR4_TSD", 1); >> + set_stage(11); >> + tmp = guest_cr4 ^ X86_CR4_DE; >> + asm volatile("mov %0, %%rsi\n\t" >> + "mov %%rsi, %%cr4\n\t" >> + ::"m"(tmp) >> + :"rsi", "memory", "cc"); >> + if (stage != 12) >> + report("Write shadowing different X86_CR4_DE", 0); >> + else >> + report("Write shadowing different X86_CR4_DE", 1); >> +} >> + >> +static int cr_shadowing_exit_handler() >> +{ >> + u64 guest_rip; >> + ulong reason; >> + u32 insn_len; >> + u32 exit_qual; >> + >> + guest_rip = vmcs_read(GUEST_RIP); >> + reason = vmcs_read(EXI_REASON) & 0xff; >> + insn_len = vmcs_read(EXI_INST_LEN); >> + exit_qual = vmcs_read(EXI_QUALIFICATION); >> + switch (reason) { >> + case VMX_VMCALL: >> + switch (stage) { >> + case 0: >> + if (guest_cr0 == vmcs_read(GUEST_CR0)) >> + report("Read through CR0", 1); >> + else >> + report("Read through CR0", 0); >> + break; >> + case 1: >> + if (guest_cr4 == vmcs_read(GUEST_CR4)) >> + report("Read through CR4", 1); >> + else >> + report("Read through CR4", 0); >> + break; >> + case 2: >> + if (guest_cr0 == vmcs_read(GUEST_CR0)) >> + report("Write through CR0", 1); >> + else >> + report("Write through CR0", 0); >> + break; >> + case 3: >> + if (guest_cr4 == vmcs_read(GUEST_CR4)) >> + report("Write through CR4", 1); >> + else >> + report("Write through CR4", 0); >> + break; >> + case 4: >> + guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP); >> + guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE); >> + vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP); >> + vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP)); >> + vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE); >> + vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE)); >> + break; >> + case 6: >> + if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP))) >> + report("Write shadowing CR0 (same value)", 1); >> + else >> + report("Write shadowing CR0 (same value)", 0); >> + break; >> + case 7: >> + if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE))) >> + report("Write shadowing CR4 (same value)", 1); >> + else >> + report("Write shadowing CR4 (same value)", 0); >> + break; >> + default: > > report("something went wrong", 0); ??? Actually, we cannot reach here for any reason. If necessary, we can put an error message here. > >> + break; >> + } >> + vmcs_write(GUEST_RIP, guest_rip + insn_len); >> + return VMX_TEST_RESUME; >> + case VMX_CR: >> + switch (stage) { >> + case 4: >> + report("Read shadowing CR0", 0); >> + set_stage(stage + 1); >> + break; >> + case 5: >> + report("Read shadowing CR4", 0); >> + set_stage(stage + 1); >> + break; >> + case 6: >> + report("Write shadowing CR0 (same value)", 0); >> + set_stage(stage + 1); >> + break; >> + case 7: >> + report("Write shadowing CR4 (same value)", 0); >> + set_stage(stage + 1); >> + break; >> + case 8: >> + case 9: >> + if (exit_qual == 0x600) >> + set_stage(stage + 1); > > What is this qualification about? Yes, ESI to CR0, but that takes a > manual to find out. And what if exit_qual is something else? Is that a > test error? For stage 9, it is a "MOV to CR0/4" vmexit, qualification indicates the detail of this exit. Take a look at the codes above of stage 9, I think it means a test error if exit_qual is something else. Arthur > >> + break; >> + case 10: >> + case 11: >> + if (exit_qual == 0x604) >> + set_stage(stage + 1); > > Same as above. > >> + default: > > ...? > >> + break; >> + } >> + vmcs_write(GUEST_RIP, guest_rip + insn_len); >> + return VMX_TEST_RESUME; >> + default: >> + printf("Unknown exit reason, %d\n", reason); >> + print_vmexit_info(); >> + } >> + return VMX_TEST_VMEXIT; >> +} >> + >> /* name/init/guest_main/exit_handler/syscall_handler/guest_regs >> basic_* just implement some basic functions */ >> struct vmx_test vmx_tests[] = { >> @@ -268,5 +484,7 @@ struct vmx_test vmx_tests[] = { >> test_ctrl_pat_exit_handler, basic_syscall_handler, {0} }, >> { "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main, >> test_ctrl_efer_exit_handler, basic_syscall_handler, {0} }, >> + { "CR shadowing", basic_init, cr_shadowing_main, >> + cr_shadowing_exit_handler, basic_syscall_handler, {0} }, >> { NULL, NULL, NULL, NULL, NULL, {0} }, >> }; >> > > Jan >
On 2013-08-15 09:40, Arthur Chunqi Li wrote: > On Thu, Aug 15, 2013 at 3:30 PM, Jan Kiszka <jan.kiszka@web.de> wrote: >> On 2013-08-13 17:56, Arthur Chunqi Li wrote: >>> Add testing for CR0/4 shadowing. >> >> A few sentences on the test strategy would be good. >> >>> >>> Signed-off-by: Arthur Chunqi Li <yzt356@gmail.com> >>> --- >>> lib/x86/vm.h | 4 + >>> x86/vmx_tests.c | 218 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ >>> 2 files changed, 222 insertions(+) >>> >>> diff --git a/lib/x86/vm.h b/lib/x86/vm.h >>> index eff6f72..6e0ce2b 100644 >>> --- a/lib/x86/vm.h >>> +++ b/lib/x86/vm.h >>> @@ -17,9 +17,13 @@ >>> #define PTE_ADDR (0xffffffffff000ull) >>> >>> #define X86_CR0_PE 0x00000001 >>> +#define X86_CR0_MP 0x00000002 >>> +#define X86_CR0_TS 0x00000008 >>> #define X86_CR0_WP 0x00010000 >>> #define X86_CR0_PG 0x80000000 >>> #define X86_CR4_VMXE 0x00000001 >>> +#define X86_CR4_TSD 0x00000004 >>> +#define X86_CR4_DE 0x00000008 >>> #define X86_CR4_PSE 0x00000010 >>> #define X86_CR4_PAE 0x00000020 >>> #define X86_CR4_PCIDE 0x00020000 >>> diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c >>> index 61b0cef..44be3f4 100644 >>> --- a/x86/vmx_tests.c >>> +++ b/x86/vmx_tests.c >>> @@ -5,12 +5,18 @@ >>> >>> u64 ia32_pat; >>> u64 ia32_efer; >>> +u32 stage; >>> >>> static inline void vmcall() >>> { >>> asm volatile("vmcall"); >>> } >>> >>> +static inline void set_stage(u32 s) >>> +{ >>> + asm volatile("mov %0, stage\n\t"::"r"(s):"memory", "cc"); >>> +} >>> + >> >> Why do we need "state = s" as assembler instruction? > This is due to assembler optimization. If we simply use "state = s", > assembler will sometimes optimize it and state may not be set indeed. volatile u32 stage? And we have barrier() to avoid reordering. >> >>> void basic_init() >>> { >>> } >>> @@ -257,6 +263,216 @@ static int test_ctrl_efer_exit_handler() >>> return VMX_TEST_VMEXIT; >>> } >>> >>> +u32 guest_cr0, guest_cr4; >>> + >>> +static void cr_shadowing_main() >>> +{ >>> + u32 cr0, cr4, tmp; >>> + >>> + // Test read through >>> + set_stage(0); >>> + guest_cr0 = read_cr0(); >>> + if (stage == 1) >>> + report("Read through CR0", 0); >>> + else >>> + vmcall(); >>> + set_stage(1); >>> + guest_cr4 = read_cr4(); >>> + if (stage == 2) >>> + report("Read through CR4", 0); >>> + else >>> + vmcall(); >>> + // Test write through >>> + guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP); >>> + guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE); >>> + set_stage(2); >>> + write_cr0(guest_cr0); >>> + if (stage == 3) >>> + report("Write throuth CR0", 0); >>> + else >>> + vmcall(); >>> + set_stage(3); >>> + write_cr4(guest_cr4); >>> + if (stage == 4) >>> + report("Write through CR4", 0); >>> + else >>> + vmcall(); >>> + // Test read shadow >>> + set_stage(4); >>> + vmcall(); >>> + cr0 = read_cr0(); >>> + if (stage != 5) { >>> + if (cr0 == guest_cr0) >>> + report("Read shadowing CR0", 1); >>> + else >>> + report("Read shadowing CR0", 0); >>> + } >>> + set_stage(5); >>> + cr4 = read_cr4(); >>> + if (stage != 6) { >>> + if (cr4 == guest_cr4) >>> + report("Read shadowing CR4", 1); >>> + else >>> + report("Read shadowing CR4", 0); >>> + } >>> + // Test write shadow (same value with shadow) >>> + set_stage(6); >>> + write_cr0(guest_cr0); >>> + if (stage == 7) >>> + report("Write shadowing CR0 (same value with shadow)", 0); >>> + else >>> + vmcall(); >>> + set_stage(7); >>> + write_cr4(guest_cr4); >>> + if (stage == 8) >>> + report("Write shadowing CR4 (same value with shadow)", 0); >>> + else >>> + vmcall(); >>> + // Test write shadow (different value) >>> + set_stage(8); >>> + tmp = guest_cr0 ^ X86_CR0_TS; >>> + asm volatile("mov %0, %%rsi\n\t" >>> + "mov %%rsi, %%cr0\n\t" >>> + ::"m"(tmp) >>> + :"rsi", "memory", "cc"); >>> + if (stage != 9) >>> + report("Write shadowing different X86_CR0_TS", 0); >>> + else >>> + report("Write shadowing different X86_CR0_TS", 1); >>> + set_stage(9); >>> + tmp = guest_cr0 ^ X86_CR0_MP; >>> + asm volatile("mov %0, %%rsi\n\t" >>> + "mov %%rsi, %%cr0\n\t" >>> + ::"m"(tmp) >>> + :"rsi", "memory", "cc"); >>> + if (stage != 10) >>> + report("Write shadowing different X86_CR0_MP", 0); >>> + else >>> + report("Write shadowing different X86_CR0_MP", 1); >>> + set_stage(10); >>> + tmp = guest_cr4 ^ X86_CR4_TSD; >>> + asm volatile("mov %0, %%rsi\n\t" >>> + "mov %%rsi, %%cr4\n\t" >>> + ::"m"(tmp) >>> + :"rsi", "memory", "cc"); >>> + if (stage != 11) >>> + report("Write shadowing different X86_CR4_TSD", 0); >>> + else >>> + report("Write shadowing different X86_CR4_TSD", 1); >>> + set_stage(11); >>> + tmp = guest_cr4 ^ X86_CR4_DE; >>> + asm volatile("mov %0, %%rsi\n\t" >>> + "mov %%rsi, %%cr4\n\t" >>> + ::"m"(tmp) >>> + :"rsi", "memory", "cc"); >>> + if (stage != 12) >>> + report("Write shadowing different X86_CR4_DE", 0); >>> + else >>> + report("Write shadowing different X86_CR4_DE", 1); >>> +} >>> + >>> +static int cr_shadowing_exit_handler() >>> +{ >>> + u64 guest_rip; >>> + ulong reason; >>> + u32 insn_len; >>> + u32 exit_qual; >>> + >>> + guest_rip = vmcs_read(GUEST_RIP); >>> + reason = vmcs_read(EXI_REASON) & 0xff; >>> + insn_len = vmcs_read(EXI_INST_LEN); >>> + exit_qual = vmcs_read(EXI_QUALIFICATION); >>> + switch (reason) { >>> + case VMX_VMCALL: >>> + switch (stage) { >>> + case 0: >>> + if (guest_cr0 == vmcs_read(GUEST_CR0)) >>> + report("Read through CR0", 1); >>> + else >>> + report("Read through CR0", 0); >>> + break; >>> + case 1: >>> + if (guest_cr4 == vmcs_read(GUEST_CR4)) >>> + report("Read through CR4", 1); >>> + else >>> + report("Read through CR4", 0); >>> + break; >>> + case 2: >>> + if (guest_cr0 == vmcs_read(GUEST_CR0)) >>> + report("Write through CR0", 1); >>> + else >>> + report("Write through CR0", 0); >>> + break; >>> + case 3: >>> + if (guest_cr4 == vmcs_read(GUEST_CR4)) >>> + report("Write through CR4", 1); >>> + else >>> + report("Write through CR4", 0); >>> + break; >>> + case 4: >>> + guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP); >>> + guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE); >>> + vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP); >>> + vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP)); >>> + vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE); >>> + vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE)); >>> + break; >>> + case 6: >>> + if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP))) >>> + report("Write shadowing CR0 (same value)", 1); >>> + else >>> + report("Write shadowing CR0 (same value)", 0); >>> + break; >>> + case 7: >>> + if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE))) >>> + report("Write shadowing CR4 (same value)", 1); >>> + else >>> + report("Write shadowing CR4 (same value)", 0); >>> + break; >>> + default: >> >> report("something went wrong", 0); ??? > Actually, we cannot reach here for any reason. If necessary, we can > put an error message here. It's always better to document this, specifically via code, instead of letting the reader wonder. >> >>> + break; >>> + } >>> + vmcs_write(GUEST_RIP, guest_rip + insn_len); >>> + return VMX_TEST_RESUME; >>> + case VMX_CR: >>> + switch (stage) { >>> + case 4: >>> + report("Read shadowing CR0", 0); >>> + set_stage(stage + 1); >>> + break; >>> + case 5: >>> + report("Read shadowing CR4", 0); >>> + set_stage(stage + 1); >>> + break; >>> + case 6: >>> + report("Write shadowing CR0 (same value)", 0); >>> + set_stage(stage + 1); >>> + break; >>> + case 7: >>> + report("Write shadowing CR4 (same value)", 0); >>> + set_stage(stage + 1); >>> + break; >>> + case 8: >>> + case 9: >>> + if (exit_qual == 0x600) >>> + set_stage(stage + 1); >> >> What is this qualification about? Yes, ESI to CR0, but that takes a >> manual to find out. And what if exit_qual is something else? Is that a >> test error? > For stage 9, it is a "MOV to CR0/4" vmexit, qualification indicates > the detail of this exit. Take a look at the codes above of stage 9, I > think it means a test error if exit_qual is something else. Again, better make it clear in the code what the expected exit_qual encodes and that any other value is an error. Jan
On Thu, Aug 15, 2013 at 3:47 PM, Jan Kiszka <jan.kiszka@web.de> wrote: > On 2013-08-15 09:40, Arthur Chunqi Li wrote: >> On Thu, Aug 15, 2013 at 3:30 PM, Jan Kiszka <jan.kiszka@web.de> wrote: >>> On 2013-08-13 17:56, Arthur Chunqi Li wrote: >>>> Add testing for CR0/4 shadowing. >>> >>> A few sentences on the test strategy would be good. >>> >>>> >>>> Signed-off-by: Arthur Chunqi Li <yzt356@gmail.com> >>>> --- >>>> lib/x86/vm.h | 4 + >>>> x86/vmx_tests.c | 218 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ >>>> 2 files changed, 222 insertions(+) >>>> >>>> diff --git a/lib/x86/vm.h b/lib/x86/vm.h >>>> index eff6f72..6e0ce2b 100644 >>>> --- a/lib/x86/vm.h >>>> +++ b/lib/x86/vm.h >>>> @@ -17,9 +17,13 @@ >>>> #define PTE_ADDR (0xffffffffff000ull) >>>> >>>> #define X86_CR0_PE 0x00000001 >>>> +#define X86_CR0_MP 0x00000002 >>>> +#define X86_CR0_TS 0x00000008 >>>> #define X86_CR0_WP 0x00010000 >>>> #define X86_CR0_PG 0x80000000 >>>> #define X86_CR4_VMXE 0x00000001 >>>> +#define X86_CR4_TSD 0x00000004 >>>> +#define X86_CR4_DE 0x00000008 >>>> #define X86_CR4_PSE 0x00000010 >>>> #define X86_CR4_PAE 0x00000020 >>>> #define X86_CR4_PCIDE 0x00020000 >>>> diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c >>>> index 61b0cef..44be3f4 100644 >>>> --- a/x86/vmx_tests.c >>>> +++ b/x86/vmx_tests.c >>>> @@ -5,12 +5,18 @@ >>>> >>>> u64 ia32_pat; >>>> u64 ia32_efer; >>>> +u32 stage; >>>> >>>> static inline void vmcall() >>>> { >>>> asm volatile("vmcall"); >>>> } >>>> >>>> +static inline void set_stage(u32 s) >>>> +{ >>>> + asm volatile("mov %0, stage\n\t"::"r"(s):"memory", "cc"); >>>> +} >>>> + >>> >>> Why do we need "state = s" as assembler instruction? >> This is due to assembler optimization. If we simply use "state = s", >> assembler will sometimes optimize it and state may not be set indeed. > > volatile u32 stage? And we have barrier() to avoid reordering. Reordering here is not a big deal here, though it is actually needed here. I occurred the following problem: stage = 1; do something that causes vmexit; stage = 2; Then the compiler will optimize "stage = 1" and "stage = 2" to one instruction "stage =2", since instructions between them don't use "stage". Can volatile solve this problem? Arthur > >>> >>>> void basic_init() >>>> { >>>> } >>>> @@ -257,6 +263,216 @@ static int test_ctrl_efer_exit_handler() >>>> return VMX_TEST_VMEXIT; >>>> } >>>> >>>> +u32 guest_cr0, guest_cr4; >>>> + >>>> +static void cr_shadowing_main() >>>> +{ >>>> + u32 cr0, cr4, tmp; >>>> + >>>> + // Test read through >>>> + set_stage(0); >>>> + guest_cr0 = read_cr0(); >>>> + if (stage == 1) >>>> + report("Read through CR0", 0); >>>> + else >>>> + vmcall(); >>>> + set_stage(1); >>>> + guest_cr4 = read_cr4(); >>>> + if (stage == 2) >>>> + report("Read through CR4", 0); >>>> + else >>>> + vmcall(); >>>> + // Test write through >>>> + guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP); >>>> + guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE); >>>> + set_stage(2); >>>> + write_cr0(guest_cr0); >>>> + if (stage == 3) >>>> + report("Write throuth CR0", 0); >>>> + else >>>> + vmcall(); >>>> + set_stage(3); >>>> + write_cr4(guest_cr4); >>>> + if (stage == 4) >>>> + report("Write through CR4", 0); >>>> + else >>>> + vmcall(); >>>> + // Test read shadow >>>> + set_stage(4); >>>> + vmcall(); >>>> + cr0 = read_cr0(); >>>> + if (stage != 5) { >>>> + if (cr0 == guest_cr0) >>>> + report("Read shadowing CR0", 1); >>>> + else >>>> + report("Read shadowing CR0", 0); >>>> + } >>>> + set_stage(5); >>>> + cr4 = read_cr4(); >>>> + if (stage != 6) { >>>> + if (cr4 == guest_cr4) >>>> + report("Read shadowing CR4", 1); >>>> + else >>>> + report("Read shadowing CR4", 0); >>>> + } >>>> + // Test write shadow (same value with shadow) >>>> + set_stage(6); >>>> + write_cr0(guest_cr0); >>>> + if (stage == 7) >>>> + report("Write shadowing CR0 (same value with shadow)", 0); >>>> + else >>>> + vmcall(); >>>> + set_stage(7); >>>> + write_cr4(guest_cr4); >>>> + if (stage == 8) >>>> + report("Write shadowing CR4 (same value with shadow)", 0); >>>> + else >>>> + vmcall(); >>>> + // Test write shadow (different value) >>>> + set_stage(8); >>>> + tmp = guest_cr0 ^ X86_CR0_TS; >>>> + asm volatile("mov %0, %%rsi\n\t" >>>> + "mov %%rsi, %%cr0\n\t" >>>> + ::"m"(tmp) >>>> + :"rsi", "memory", "cc"); >>>> + if (stage != 9) >>>> + report("Write shadowing different X86_CR0_TS", 0); >>>> + else >>>> + report("Write shadowing different X86_CR0_TS", 1); >>>> + set_stage(9); >>>> + tmp = guest_cr0 ^ X86_CR0_MP; >>>> + asm volatile("mov %0, %%rsi\n\t" >>>> + "mov %%rsi, %%cr0\n\t" >>>> + ::"m"(tmp) >>>> + :"rsi", "memory", "cc"); >>>> + if (stage != 10) >>>> + report("Write shadowing different X86_CR0_MP", 0); >>>> + else >>>> + report("Write shadowing different X86_CR0_MP", 1); >>>> + set_stage(10); >>>> + tmp = guest_cr4 ^ X86_CR4_TSD; >>>> + asm volatile("mov %0, %%rsi\n\t" >>>> + "mov %%rsi, %%cr4\n\t" >>>> + ::"m"(tmp) >>>> + :"rsi", "memory", "cc"); >>>> + if (stage != 11) >>>> + report("Write shadowing different X86_CR4_TSD", 0); >>>> + else >>>> + report("Write shadowing different X86_CR4_TSD", 1); >>>> + set_stage(11); >>>> + tmp = guest_cr4 ^ X86_CR4_DE; >>>> + asm volatile("mov %0, %%rsi\n\t" >>>> + "mov %%rsi, %%cr4\n\t" >>>> + ::"m"(tmp) >>>> + :"rsi", "memory", "cc"); >>>> + if (stage != 12) >>>> + report("Write shadowing different X86_CR4_DE", 0); >>>> + else >>>> + report("Write shadowing different X86_CR4_DE", 1); >>>> +} >>>> + >>>> +static int cr_shadowing_exit_handler() >>>> +{ >>>> + u64 guest_rip; >>>> + ulong reason; >>>> + u32 insn_len; >>>> + u32 exit_qual; >>>> + >>>> + guest_rip = vmcs_read(GUEST_RIP); >>>> + reason = vmcs_read(EXI_REASON) & 0xff; >>>> + insn_len = vmcs_read(EXI_INST_LEN); >>>> + exit_qual = vmcs_read(EXI_QUALIFICATION); >>>> + switch (reason) { >>>> + case VMX_VMCALL: >>>> + switch (stage) { >>>> + case 0: >>>> + if (guest_cr0 == vmcs_read(GUEST_CR0)) >>>> + report("Read through CR0", 1); >>>> + else >>>> + report("Read through CR0", 0); >>>> + break; >>>> + case 1: >>>> + if (guest_cr4 == vmcs_read(GUEST_CR4)) >>>> + report("Read through CR4", 1); >>>> + else >>>> + report("Read through CR4", 0); >>>> + break; >>>> + case 2: >>>> + if (guest_cr0 == vmcs_read(GUEST_CR0)) >>>> + report("Write through CR0", 1); >>>> + else >>>> + report("Write through CR0", 0); >>>> + break; >>>> + case 3: >>>> + if (guest_cr4 == vmcs_read(GUEST_CR4)) >>>> + report("Write through CR4", 1); >>>> + else >>>> + report("Write through CR4", 0); >>>> + break; >>>> + case 4: >>>> + guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP); >>>> + guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE); >>>> + vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP); >>>> + vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP)); >>>> + vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE); >>>> + vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE)); >>>> + break; >>>> + case 6: >>>> + if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP))) >>>> + report("Write shadowing CR0 (same value)", 1); >>>> + else >>>> + report("Write shadowing CR0 (same value)", 0); >>>> + break; >>>> + case 7: >>>> + if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE))) >>>> + report("Write shadowing CR4 (same value)", 1); >>>> + else >>>> + report("Write shadowing CR4 (same value)", 0); >>>> + break; >>>> + default: >>> >>> report("something went wrong", 0); ??? >> Actually, we cannot reach here for any reason. If necessary, we can >> put an error message here. > > It's always better to document this, specifically via code, instead of > letting the reader wonder. > >>> >>>> + break; >>>> + } >>>> + vmcs_write(GUEST_RIP, guest_rip + insn_len); >>>> + return VMX_TEST_RESUME; >>>> + case VMX_CR: >>>> + switch (stage) { >>>> + case 4: >>>> + report("Read shadowing CR0", 0); >>>> + set_stage(stage + 1); >>>> + break; >>>> + case 5: >>>> + report("Read shadowing CR4", 0); >>>> + set_stage(stage + 1); >>>> + break; >>>> + case 6: >>>> + report("Write shadowing CR0 (same value)", 0); >>>> + set_stage(stage + 1); >>>> + break; >>>> + case 7: >>>> + report("Write shadowing CR4 (same value)", 0); >>>> + set_stage(stage + 1); >>>> + break; >>>> + case 8: >>>> + case 9: >>>> + if (exit_qual == 0x600) >>>> + set_stage(stage + 1); >>> >>> What is this qualification about? Yes, ESI to CR0, but that takes a >>> manual to find out. And what if exit_qual is something else? Is that a >>> test error? >> For stage 9, it is a "MOV to CR0/4" vmexit, qualification indicates >> the detail of this exit. Take a look at the codes above of stage 9, I >> think it means a test error if exit_qual is something else. > > Again, better make it clear in the code what the expected exit_qual > encodes and that any other value is an error. > > Jan > >
On 2013-08-15 09:59, Arthur Chunqi Li wrote: > On Thu, Aug 15, 2013 at 3:47 PM, Jan Kiszka <jan.kiszka@web.de> wrote: >> On 2013-08-15 09:40, Arthur Chunqi Li wrote: >>> On Thu, Aug 15, 2013 at 3:30 PM, Jan Kiszka <jan.kiszka@web.de> wrote: >>>> On 2013-08-13 17:56, Arthur Chunqi Li wrote: >>>>> Add testing for CR0/4 shadowing. >>>> >>>> A few sentences on the test strategy would be good. >>>> >>>>> >>>>> Signed-off-by: Arthur Chunqi Li <yzt356@gmail.com> >>>>> --- >>>>> lib/x86/vm.h | 4 + >>>>> x86/vmx_tests.c | 218 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ >>>>> 2 files changed, 222 insertions(+) >>>>> >>>>> diff --git a/lib/x86/vm.h b/lib/x86/vm.h >>>>> index eff6f72..6e0ce2b 100644 >>>>> --- a/lib/x86/vm.h >>>>> +++ b/lib/x86/vm.h >>>>> @@ -17,9 +17,13 @@ >>>>> #define PTE_ADDR (0xffffffffff000ull) >>>>> >>>>> #define X86_CR0_PE 0x00000001 >>>>> +#define X86_CR0_MP 0x00000002 >>>>> +#define X86_CR0_TS 0x00000008 >>>>> #define X86_CR0_WP 0x00010000 >>>>> #define X86_CR0_PG 0x80000000 >>>>> #define X86_CR4_VMXE 0x00000001 >>>>> +#define X86_CR4_TSD 0x00000004 >>>>> +#define X86_CR4_DE 0x00000008 >>>>> #define X86_CR4_PSE 0x00000010 >>>>> #define X86_CR4_PAE 0x00000020 >>>>> #define X86_CR4_PCIDE 0x00020000 >>>>> diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c >>>>> index 61b0cef..44be3f4 100644 >>>>> --- a/x86/vmx_tests.c >>>>> +++ b/x86/vmx_tests.c >>>>> @@ -5,12 +5,18 @@ >>>>> >>>>> u64 ia32_pat; >>>>> u64 ia32_efer; >>>>> +u32 stage; >>>>> >>>>> static inline void vmcall() >>>>> { >>>>> asm volatile("vmcall"); >>>>> } >>>>> >>>>> +static inline void set_stage(u32 s) >>>>> +{ >>>>> + asm volatile("mov %0, stage\n\t"::"r"(s):"memory", "cc"); >>>>> +} >>>>> + >>>> >>>> Why do we need "state = s" as assembler instruction? >>> This is due to assembler optimization. If we simply use "state = s", >>> assembler will sometimes optimize it and state may not be set indeed. >> >> volatile u32 stage? And we have barrier() to avoid reordering. > Reordering here is not a big deal here, though it is actually needed > here. I occurred the following problem: > > stage = 1; > do something that causes vmexit; > stage = 2; > > Then the compiler will optimize "stage = 1" and "stage = 2" to one > instruction "stage =2", since instructions between them don't use > "stage". Can volatile solve this problem? Yep. Jan
Il 15/08/2013 09:59, Arthur Chunqi Li ha scritto: >> > volatile u32 stage? And we have barrier() to avoid reordering. > Reordering here is not a big deal here, though it is actually needed > here. I occurred the following problem: > > stage = 1; > do something that causes vmexit; > stage = 2; > > Then the compiler will optimize "stage = 1" and "stage = 2" to one > instruction "stage =2", since instructions between them don't use > "stage". Can volatile solve this problem? I'm not sure if volatile stores are reordered against non-volatile stores. Better keep set_stage() but write it as barrier(); stage = s; barrier(); Paolo -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
? 2013-8-18?22:07?Paolo Bonzini <pbonzini@redhat.com> ??? > Il 15/08/2013 09:59, Arthur Chunqi Li ha scritto: >>>> volatile u32 stage? And we have barrier() to avoid reordering. >> Reordering here is not a big deal here, though it is actually needed >> here. I occurred the following problem: >> >> stage = 1; >> do something that causes vmexit; >> stage = 2; >> >> Then the compiler will optimize "stage = 1" and "stage = 2" to one >> instruction "stage =2", since instructions between them don't use >> "stage". Can volatile solve this problem? > > I'm not sure if volatile stores are reordered against non-volatile stores. > > Better keep set_stage() but write it as > > barrier(); > stage = s; > barrier(); Yep. I have done it like this in the 2nd version. Arthur > > Paolo -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/lib/x86/vm.h b/lib/x86/vm.h index eff6f72..6e0ce2b 100644 --- a/lib/x86/vm.h +++ b/lib/x86/vm.h @@ -17,9 +17,13 @@ #define PTE_ADDR (0xffffffffff000ull) #define X86_CR0_PE 0x00000001 +#define X86_CR0_MP 0x00000002 +#define X86_CR0_TS 0x00000008 #define X86_CR0_WP 0x00010000 #define X86_CR0_PG 0x80000000 #define X86_CR4_VMXE 0x00000001 +#define X86_CR4_TSD 0x00000004 +#define X86_CR4_DE 0x00000008 #define X86_CR4_PSE 0x00000010 #define X86_CR4_PAE 0x00000020 #define X86_CR4_PCIDE 0x00020000 diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c index 61b0cef..44be3f4 100644 --- a/x86/vmx_tests.c +++ b/x86/vmx_tests.c @@ -5,12 +5,18 @@ u64 ia32_pat; u64 ia32_efer; +u32 stage; static inline void vmcall() { asm volatile("vmcall"); } +static inline void set_stage(u32 s) +{ + asm volatile("mov %0, stage\n\t"::"r"(s):"memory", "cc"); +} + void basic_init() { } @@ -257,6 +263,216 @@ static int test_ctrl_efer_exit_handler() return VMX_TEST_VMEXIT; } +u32 guest_cr0, guest_cr4; + +static void cr_shadowing_main() +{ + u32 cr0, cr4, tmp; + + // Test read through + set_stage(0); + guest_cr0 = read_cr0(); + if (stage == 1) + report("Read through CR0", 0); + else + vmcall(); + set_stage(1); + guest_cr4 = read_cr4(); + if (stage == 2) + report("Read through CR4", 0); + else + vmcall(); + // Test write through + guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP); + guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE); + set_stage(2); + write_cr0(guest_cr0); + if (stage == 3) + report("Write throuth CR0", 0); + else + vmcall(); + set_stage(3); + write_cr4(guest_cr4); + if (stage == 4) + report("Write through CR4", 0); + else + vmcall(); + // Test read shadow + set_stage(4); + vmcall(); + cr0 = read_cr0(); + if (stage != 5) { + if (cr0 == guest_cr0) + report("Read shadowing CR0", 1); + else + report("Read shadowing CR0", 0); + } + set_stage(5); + cr4 = read_cr4(); + if (stage != 6) { + if (cr4 == guest_cr4) + report("Read shadowing CR4", 1); + else + report("Read shadowing CR4", 0); + } + // Test write shadow (same value with shadow) + set_stage(6); + write_cr0(guest_cr0); + if (stage == 7) + report("Write shadowing CR0 (same value with shadow)", 0); + else + vmcall(); + set_stage(7); + write_cr4(guest_cr4); + if (stage == 8) + report("Write shadowing CR4 (same value with shadow)", 0); + else + vmcall(); + // Test write shadow (different value) + set_stage(8); + tmp = guest_cr0 ^ X86_CR0_TS; + asm volatile("mov %0, %%rsi\n\t" + "mov %%rsi, %%cr0\n\t" + ::"m"(tmp) + :"rsi", "memory", "cc"); + if (stage != 9) + report("Write shadowing different X86_CR0_TS", 0); + else + report("Write shadowing different X86_CR0_TS", 1); + set_stage(9); + tmp = guest_cr0 ^ X86_CR0_MP; + asm volatile("mov %0, %%rsi\n\t" + "mov %%rsi, %%cr0\n\t" + ::"m"(tmp) + :"rsi", "memory", "cc"); + if (stage != 10) + report("Write shadowing different X86_CR0_MP", 0); + else + report("Write shadowing different X86_CR0_MP", 1); + set_stage(10); + tmp = guest_cr4 ^ X86_CR4_TSD; + asm volatile("mov %0, %%rsi\n\t" + "mov %%rsi, %%cr4\n\t" + ::"m"(tmp) + :"rsi", "memory", "cc"); + if (stage != 11) + report("Write shadowing different X86_CR4_TSD", 0); + else + report("Write shadowing different X86_CR4_TSD", 1); + set_stage(11); + tmp = guest_cr4 ^ X86_CR4_DE; + asm volatile("mov %0, %%rsi\n\t" + "mov %%rsi, %%cr4\n\t" + ::"m"(tmp) + :"rsi", "memory", "cc"); + if (stage != 12) + report("Write shadowing different X86_CR4_DE", 0); + else + report("Write shadowing different X86_CR4_DE", 1); +} + +static int cr_shadowing_exit_handler() +{ + u64 guest_rip; + ulong reason; + u32 insn_len; + u32 exit_qual; + + guest_rip = vmcs_read(GUEST_RIP); + reason = vmcs_read(EXI_REASON) & 0xff; + insn_len = vmcs_read(EXI_INST_LEN); + exit_qual = vmcs_read(EXI_QUALIFICATION); + switch (reason) { + case VMX_VMCALL: + switch (stage) { + case 0: + if (guest_cr0 == vmcs_read(GUEST_CR0)) + report("Read through CR0", 1); + else + report("Read through CR0", 0); + break; + case 1: + if (guest_cr4 == vmcs_read(GUEST_CR4)) + report("Read through CR4", 1); + else + report("Read through CR4", 0); + break; + case 2: + if (guest_cr0 == vmcs_read(GUEST_CR0)) + report("Write through CR0", 1); + else + report("Write through CR0", 0); + break; + case 3: + if (guest_cr4 == vmcs_read(GUEST_CR4)) + report("Write through CR4", 1); + else + report("Write through CR4", 0); + break; + case 4: + guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP); + guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE); + vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP); + vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP)); + vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE); + vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE)); + break; + case 6: + if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP))) + report("Write shadowing CR0 (same value)", 1); + else + report("Write shadowing CR0 (same value)", 0); + break; + case 7: + if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE))) + report("Write shadowing CR4 (same value)", 1); + else + report("Write shadowing CR4 (same value)", 0); + break; + default: + break; + } + vmcs_write(GUEST_RIP, guest_rip + insn_len); + return VMX_TEST_RESUME; + case VMX_CR: + switch (stage) { + case 4: + report("Read shadowing CR0", 0); + set_stage(stage + 1); + break; + case 5: + report("Read shadowing CR4", 0); + set_stage(stage + 1); + break; + case 6: + report("Write shadowing CR0 (same value)", 0); + set_stage(stage + 1); + break; + case 7: + report("Write shadowing CR4 (same value)", 0); + set_stage(stage + 1); + break; + case 8: + case 9: + if (exit_qual == 0x600) + set_stage(stage + 1); + break; + case 10: + case 11: + if (exit_qual == 0x604) + set_stage(stage + 1); + default: + break; + } + vmcs_write(GUEST_RIP, guest_rip + insn_len); + return VMX_TEST_RESUME; + default: + printf("Unknown exit reason, %d\n", reason); + print_vmexit_info(); + } + return VMX_TEST_VMEXIT; +} + /* name/init/guest_main/exit_handler/syscall_handler/guest_regs basic_* just implement some basic functions */ struct vmx_test vmx_tests[] = { @@ -268,5 +484,7 @@ struct vmx_test vmx_tests[] = { test_ctrl_pat_exit_handler, basic_syscall_handler, {0} }, { "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main, test_ctrl_efer_exit_handler, basic_syscall_handler, {0} }, + { "CR shadowing", basic_init, cr_shadowing_main, + cr_shadowing_exit_handler, basic_syscall_handler, {0} }, { NULL, NULL, NULL, NULL, NULL, {0} }, };
Add testing for CR0/4 shadowing. Signed-off-by: Arthur Chunqi Li <yzt356@gmail.com> --- lib/x86/vm.h | 4 + x86/vmx_tests.c | 218 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 222 insertions(+)