diff mbox series

[kvm-unit-tests,3/3] x86: nSVM: Add an exception test framework and tests

Message ID 20220207051202.577951-4-manali.shukla@amd.com (mailing list archive)
State New, archived
Headers show
Series nSVM: Add testing for routing L2 exceptions | expand

Commit Message

Manali Shukla Feb. 7, 2022, 5:12 a.m. UTC
Set up a test framework that verifies an exception occurring
in L2 is forwarded to the right place (L1 or L2)
It adds an exception test array and exception callbacks to that array.

Tests two conditions for each exception
1) Exception generated in L2, is handled by L2 when L2 exception handler
   is registered.
2) Exception generated in L2, is handled by L1 when intercept exception
   bit map is set in L1.

Add testing for below exceptions:
(#GP, #UD, #DE, #BP, #NM, #OF, #DB, #AC)
1. #GP is generated in c by non-canonical access in L2.
2. #UD is generated by calling "ud2" instruction in L2.
3. #DE is generated using instrumented code which generates
   divide by zero condition
4. #BP is generated by calling "int3" instruction in L2.
5. #NM is generated by calling floating point instruction "fnop"
   in L2 when TS bit is set.
6. #OF is generated using instrumented code and "into" instruction
   is called in that code in L2.
7. #DB is generated by setting TF bit before entering to L2.
8. #AC is genrated by writing 8 bytes to 4 byte aligned address in L2
   user mode when AM bit is set in CR0 register and AC bit is set in
   RFLAGS

Signed-off-by: Manali Shukla <manali.shukla@amd.com>
---
 x86/svm_tests.c | 185 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 185 insertions(+)

Comments

Aaron Lewis Feb. 14, 2022, 8:20 p.m. UTC | #1
> +static void svm_l2_nm_test(struct svm_test *svm)
> +{
> +    write_cr0(read_cr0() | X86_CR0_TS);
> +    asm volatile("fnop");
> +}
> +
> +static void svm_l2_of_test(struct svm_test *svm)
> +{
> +    struct far_pointer32 fp = {
> +        .offset = (uintptr_t)&&into,
> +        .selector = KERNEL_CS32,
> +    };
> +    uintptr_t rsp;
> +
> +    asm volatile ("mov %%rsp, %0" : "=r"(rsp));
> +
> +    if (fp.offset != (uintptr_t)&&into) {
> +        printf("Codee address too high.\n");

Nit: Code

> +        return;
> +    }
> +
> +    if ((u32)rsp != rsp) {
> +        printf("Stack address too high.\n");
> +    }
> +
> +    asm goto("lcall *%0" : : "m" (fp) : "rax" : into);
> +    return;
> +into:
> +    asm volatile (".code32;"
> +            "movl $0x7fffffff, %eax;"
> +            "addl %eax, %eax;"
> +            "into;"
> +            "lret;"
> +            ".code64");
> +    __builtin_unreachable();
> +}
> +

> +static void svm_l2_ac_test(struct svm_test *test)
> +{
> +    bool hit_ac = false;
> +
> +    write_cr0(read_cr0() | X86_CR0_AM);
> +    write_rflags(read_rflags() | X86_EFLAGS_AC);
> +
> +    run_in_user(usermode_callback, AC_VECTOR, 0, 0, 0, 0, &hit_ac);
> +
> +    report(hit_ac, "Usermode #AC handled in L2");
> +    vmmcall();
> +}
> +
> +static void svm_ac_init(void)
> +{
> +    set_user_mask_all(phys_to_virt(read_cr3()), PAGE_LEVEL);
> +}
> +
> +static void svm_ac_uninit(void)
> +{
> +    clear_user_mask_all(phys_to_virt(read_cr3()), PAGE_LEVEL);
> +}
> +
> +struct svm_exception_test {
> +    u8 vector;
> +    void (*guest_code)(struct svm_test*);
> +    void (*init_test)(void);
> +    void (*uninit_test)(void);
> +};
> +
> +struct svm_exception_test svm_exception_tests[] = {
> +    { GP_VECTOR, svm_l2_gp_test },
> +    { UD_VECTOR, svm_l2_ud_test },
> +    { DE_VECTOR, svm_l2_de_test },
> +    { BP_VECTOR, svm_l2_bp_test },
> +    { NM_VECTOR, svm_l2_nm_test },
> +    { OF_VECTOR, svm_l2_of_test },
> +    { DB_VECTOR, svm_l2_db_test },
> +    { AC_VECTOR, svm_l2_ac_test, svm_ac_init, svm_ac_uninit },
> +};

If you set and clear PT_USER_MASK in svm_l2_ac_test() before calling
into userspace you can remove init_test and uninit_test from the
framework all together.  That will simplify the code.

Further, it would be nice to then hoist this framework and the one in
vmx into a common x86 file, but looking at this that may be something
to think about in the future.  There would have to be wrappers when
interacting with the vmc{s,b} and macros at the very least.

> +
> +static u8 svm_exception_test_vector;
> +
> +static void svm_exception_handler(struct ex_regs *regs)
> +{
> +    report(regs->vector == svm_exception_test_vector,
> +            "Handling %s in L2's exception handler",
> +            exception_mnemonic(svm_exception_test_vector));
> +    vmmcall();
> +}
> +
Shukla, Manali Feb. 17, 2022, 3:26 a.m. UTC | #2
On 2/15/2022 1:50 AM, Aaron Lewis wrote:
>> +static void svm_l2_nm_test(struct svm_test *svm)
>> +{
>> +    write_cr0(read_cr0() | X86_CR0_TS);
>> +    asm volatile("fnop");
>> +}
>> +
>> +static void svm_l2_of_test(struct svm_test *svm)
>> +{
>> +    struct far_pointer32 fp = {
>> +        .offset = (uintptr_t)&&into,
>> +        .selector = KERNEL_CS32,
>> +    };
>> +    uintptr_t rsp;
>> +
>> +    asm volatile ("mov %%rsp, %0" : "=r"(rsp));
>> +
>> +    if (fp.offset != (uintptr_t)&&into) {
>> +        printf("Codee address too high.\n");
> 
> Nit: Code
> 
>> +        return;
>> +    }
>> +
>> +    if ((u32)rsp != rsp) {
>> +        printf("Stack address too high.\n");
>> +    }
>> +
>> +    asm goto("lcall *%0" : : "m" (fp) : "rax" : into);
>> +    return;
>> +into:
>> +    asm volatile (".code32;"
>> +            "movl $0x7fffffff, %eax;"
>> +            "addl %eax, %eax;"
>> +            "into;"
>> +            "lret;"
>> +            ".code64");
>> +    __builtin_unreachable();
>> +}
>> +
> 
>> +static void svm_l2_ac_test(struct svm_test *test)
>> +{
>> +    bool hit_ac = false;
>> +
>> +    write_cr0(read_cr0() | X86_CR0_AM);
>> +    write_rflags(read_rflags() | X86_EFLAGS_AC);
>> +
>> +    run_in_user(usermode_callback, AC_VECTOR, 0, 0, 0, 0, &hit_ac);
>> +
>> +    report(hit_ac, "Usermode #AC handled in L2");
>> +    vmmcall();
>> +}
>> +
>> +static void svm_ac_init(void)
>> +{
>> +    set_user_mask_all(phys_to_virt(read_cr3()), PAGE_LEVEL);
>> +}
>> +
>> +static void svm_ac_uninit(void)
>> +{
>> +    clear_user_mask_all(phys_to_virt(read_cr3()), PAGE_LEVEL);
>> +}
>> +
>> +struct svm_exception_test {
>> +    u8 vector;
>> +    void (*guest_code)(struct svm_test*);
>> +    void (*init_test)(void);
>> +    void (*uninit_test)(void);
>> +};
>> +
>> +struct svm_exception_test svm_exception_tests[] = {
>> +    { GP_VECTOR, svm_l2_gp_test },
>> +    { UD_VECTOR, svm_l2_ud_test },
>> +    { DE_VECTOR, svm_l2_de_test },
>> +    { BP_VECTOR, svm_l2_bp_test },
>> +    { NM_VECTOR, svm_l2_nm_test },
>> +    { OF_VECTOR, svm_l2_of_test },
>> +    { DB_VECTOR, svm_l2_db_test },
>> +    { AC_VECTOR, svm_l2_ac_test, svm_ac_init, svm_ac_uninit },
>> +};
> 
> If you set and clear PT_USER_MASK in svm_l2_ac_test() before calling
> into userspace you can remove init_test and uninit_test from the
> framework all together.  That will simplify the code.
> 
If clear user mask is called after userspace code, when #AC exception is 
intercepted by L1, the control directly goes to L1 and it does not reach 
clear_user_mask_all() function (called after user space code function run_in_user()).

That is why I have added init_test and uninit_test function

> Further, it would be nice to then hoist this framework and the one in
> vmx into a common x86 file, but looking at this that may be something
> to think about in the future.  There would have to be wrappers when
> interacting with the vmc{s,b} and macros at the very least.
> 

Yeah we can think of this in future.

>> +
>> +static u8 svm_exception_test_vector;
>> +
>> +static void svm_exception_handler(struct ex_regs *regs)
>> +{
>> +    report(regs->vector == svm_exception_test_vector,
>> +            "Handling %s in L2's exception handler",
>> +            exception_mnemonic(svm_exception_test_vector));
>> +    vmmcall();
>> +}
>> +

-Manali
Aaron Lewis Feb. 17, 2022, 2:46 p.m. UTC | #3
> >> +};
> >
> > If you set and clear PT_USER_MASK in svm_l2_ac_test() before calling
> > into userspace you can remove init_test and uninit_test from the
> > framework all together.  That will simplify the code.
> >
> If clear user mask is called after userspace code, when #AC exception is
> intercepted by L1, the control directly goes to L1 and it does not reach
> clear_user_mask_all() function (called after user space code function run_in_user()).
>
> That is why I have added init_test and uninit_test function
>

Ah, that makes sense.  Though IIUC, you are now moving the set/clear
elsewhere, right?  If so, it seems like init_test() and uninit_test()
are no longer needed.

> > Further, it would be nice to then hoist this framework and the one in
> > vmx into a common x86 file, but looking at this that may be something
> > to think about in the future.  There would have to be wrappers when
> > interacting with the vmc{s,b} and macros at the very least.
> >
diff mbox series

Patch

diff --git a/x86/svm_tests.c b/x86/svm_tests.c
index 0707786..66bfb51 100644
--- a/x86/svm_tests.c
+++ b/x86/svm_tests.c
@@ -10,6 +10,7 @@ 
 #include "isr.h"
 #include "apic.h"
 #include "delay.h"
+#include "x86/usermode.h"
 
 #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f
 
@@ -3074,6 +3075,189 @@  static void svm_nm_test(void)
         "fnop with CR0.TS and CR0.EM unset no #NM excpetion");
 }
 
+static void svm_l2_gp_test(struct svm_test *test)
+{
+    *(volatile u64 *)NONCANONICAL = 0;
+}
+
+static void svm_l2_ud_test(struct svm_test *test)
+{
+    asm volatile ("ud2");
+}
+
+static void svm_l2_de_test(struct svm_test *test)
+{
+        asm volatile (
+                "xor %%eax, %%eax\n\t"
+                "xor %%ebx, %%ebx\n\t"
+                "xor %%edx, %%edx\n\t"
+                "idiv %%ebx\n\t"
+                ::: "eax", "ebx", "edx");
+}
+
+static void svm_l2_bp_test(struct svm_test *svm)
+{
+    asm volatile ("int3");
+}
+
+static void svm_l2_nm_test(struct svm_test *svm)
+{
+    write_cr0(read_cr0() | X86_CR0_TS);
+    asm volatile("fnop");
+}
+
+static void svm_l2_of_test(struct svm_test *svm)
+{
+    struct far_pointer32 fp = {
+        .offset = (uintptr_t)&&into,
+        .selector = KERNEL_CS32,
+    };
+    uintptr_t rsp;
+
+    asm volatile ("mov %%rsp, %0" : "=r"(rsp));
+
+    if (fp.offset != (uintptr_t)&&into) {
+        printf("Codee address too high.\n");
+        return;
+    }
+
+    if ((u32)rsp != rsp) {
+        printf("Stack address too high.\n");
+    }
+
+    asm goto("lcall *%0" : : "m" (fp) : "rax" : into);
+    return;
+into:
+    asm volatile (".code32;"
+            "movl $0x7fffffff, %eax;"
+            "addl %eax, %eax;"
+            "into;"
+            "lret;"
+            ".code64");
+    __builtin_unreachable();
+}
+
+static void svm_l2_db_test(struct svm_test *test)
+{
+    write_rflags(read_rflags() | X86_EFLAGS_TF);
+}
+
+static uint64_t usermode_callback(void)
+{
+   /*
+    * Trigger an #AC by writing 8 bytes to a 4-byte aligned address.
+    * Disclaimer: It is assumed that the stack pointer is aligned
+    * on a 16-byte boundary as x86_64 stacks should be.
+    */
+    asm volatile("movq $0, -0x4(%rsp)");
+
+    return 0;
+}
+
+static void svm_l2_ac_test(struct svm_test *test)
+{
+    bool hit_ac = false;
+
+    write_cr0(read_cr0() | X86_CR0_AM);
+    write_rflags(read_rflags() | X86_EFLAGS_AC);
+ 
+    run_in_user(usermode_callback, AC_VECTOR, 0, 0, 0, 0, &hit_ac);
+
+    report(hit_ac, "Usermode #AC handled in L2");
+    vmmcall();
+}
+
+static void svm_ac_init(void)
+{
+    set_user_mask_all(phys_to_virt(read_cr3()), PAGE_LEVEL);
+}
+
+static void svm_ac_uninit(void)
+{
+    clear_user_mask_all(phys_to_virt(read_cr3()), PAGE_LEVEL);
+}
+
+struct svm_exception_test {
+    u8 vector;
+    void (*guest_code)(struct svm_test*);
+    void (*init_test)(void);
+    void (*uninit_test)(void);
+};
+
+struct svm_exception_test svm_exception_tests[] = {
+    { GP_VECTOR, svm_l2_gp_test },
+    { UD_VECTOR, svm_l2_ud_test },
+    { DE_VECTOR, svm_l2_de_test },
+    { BP_VECTOR, svm_l2_bp_test },
+    { NM_VECTOR, svm_l2_nm_test },
+    { OF_VECTOR, svm_l2_of_test },
+    { DB_VECTOR, svm_l2_db_test },
+    { AC_VECTOR, svm_l2_ac_test, svm_ac_init, svm_ac_uninit },
+};
+
+static u8 svm_exception_test_vector;
+
+static void svm_exception_handler(struct ex_regs *regs)
+{
+    report(regs->vector == svm_exception_test_vector,
+            "Handling %s in L2's exception handler",
+            exception_mnemonic(svm_exception_test_vector));
+    vmmcall();
+}
+
+static void handle_exception_in_l2(u8 vector)
+{
+    handler old_handler = handle_exception(vector, svm_exception_handler);
+    svm_exception_test_vector = vector;
+
+    report(svm_vmrun() == SVM_EXIT_VMMCALL,
+           "%s handled by L2", exception_mnemonic(vector));
+
+    handle_exception(vector, old_handler);
+}
+
+static void handle_exception_in_l1(u32 vector)
+{
+    u32 old_ie = vmcb->control.intercept_exceptions;
+
+    vmcb->control.intercept_exceptions |= (1ULL << vector);
+
+    report(svm_vmrun() == (SVM_EXIT_EXCP_BASE + vector),
+           "%s handled by L1",  exception_mnemonic(vector));
+
+    vmcb->control.intercept_exceptions = old_ie;
+}
+
+static void svm_exception_test(void)
+{
+    struct svm_exception_test *t;
+    int i;
+
+    for (i = 0; i < ARRAY_SIZE(svm_exception_tests); i++) {
+        t = &svm_exception_tests[i];
+        test_set_guest(t->guest_code);
+        if (t->init_test)
+            t->init_test();
+
+        handle_exception_in_l2(t->vector);
+
+        if (t->uninit_test)
+            t->uninit_test();
+
+        vmcb_ident(vmcb);
+
+        if (t->init_test)
+            t->init_test();
+
+        handle_exception_in_l1(t->vector);
+
+        if (t->uninit_test)
+            t->uninit_test();
+
+        vmcb_ident(vmcb);
+    }
+}
+
 struct svm_test svm_tests[] = {
     { "null", default_supported, default_prepare,
       default_prepare_gif_clear, null_test,
@@ -3196,5 +3380,6 @@  struct svm_test svm_tests[] = {
     TEST(svm_nm_test),
     TEST(svm_int3_test),
     TEST(svm_into_test),
+    TEST(svm_exception_test),
     { NULL, NULL, NULL, NULL, NULL, NULL, NULL }
 };