diff mbox series

[kvm-unit-tests] Update AMD instructions to conform to LLVM assembler

Message ID 20190220202442.145494-1-pshier@google.com (mailing list archive)
State New, archived
Headers show
Series [kvm-unit-tests] Update AMD instructions to conform to LLVM assembler | expand

Commit Message

Peter Shier Feb. 20, 2019, 8:24 p.m. UTC
The GNU assembler (gas) allows omitting operands where there is only a
single choice e.g. "VMRUN rAX".The LLVM assembler requires those operands
even though they are the default and only choice.

In addition, LLVM does not allow a CLGI instruction with a terminating
\n\t. Adding a ; separator after the instruction is a workaround.

Signed-off-by: Peter Shier <pshier@google.com>
Reviewed-by: Marc Orr <marcorr@google.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
---
 x86/svm.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

Comments

Jim Mattson Dec. 11, 2019, 8:22 p.m. UTC | #1
On Wed, Feb 20, 2019 at 12:25 PM Peter Shier <pshier@google.com> wrote:
>
> The GNU assembler (gas) allows omitting operands where there is only a
> single choice e.g. "VMRUN rAX".The LLVM assembler requires those operands
> even though they are the default and only choice.
>
> In addition, LLVM does not allow a CLGI instruction with a terminating
> \n\t. Adding a ; separator after the instruction is a workaround.
>
> Signed-off-by: Peter Shier <pshier@google.com>
> Reviewed-by: Marc Orr <marcorr@google.com>
> Reviewed-by: Jim Mattson <jmattson@google.com>
> ---
>  x86/svm.c | 16 ++++++++--------
>  1 file changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/x86/svm.c b/x86/svm.c
> index bc74e7c690a8..e5cb730b08cb 100644
> --- a/x86/svm.c
> +++ b/x86/svm.c
> @@ -154,7 +154,7 @@ static void vmcb_ident(struct vmcb *vmcb)
>      struct descriptor_table_ptr desc_table_ptr;
>
>      memset(vmcb, 0, sizeof(*vmcb));
> -    asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
> +    asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
>      vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
>      vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
>      vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
> @@ -262,20 +262,20 @@ static void test_run(struct test *test, struct vmcb *vmcb)
>      do {
>          tsc_start = rdtsc();
>          asm volatile (
> -            "clgi \n\t"
> -            "vmload \n\t"
> +            "clgi;\n\t" // semi-colon needed for LLVM compatibility
> +            "vmload %0\n\t"
>              "mov regs+0x80, %%r15\n\t"  // rflags
>              "mov %%r15, 0x170(%0)\n\t"
>              "mov regs, %%r15\n\t"       // rax
>              "mov %%r15, 0x1f8(%0)\n\t"
>              LOAD_GPR_C
> -            "vmrun \n\t"
> +            "vmrun %0\n\t"
>              SAVE_GPR_C
>              "mov 0x170(%0), %%r15\n\t"  // rflags
>              "mov %%r15, regs+0x80\n\t"
>              "mov 0x1f8(%0), %%r15\n\t"  // rax
>              "mov %%r15, regs\n\t"
> -            "vmsave \n\t"
> +            "vmsave %0\n\t"
>              "stgi"
>              : : "a"(vmcb_phys)
>              : "rbx", "rcx", "rdx", "rsi",
> @@ -330,7 +330,7 @@ static bool check_no_vmrun_int(struct test *test)
>
>  static void test_vmrun(struct test *test)
>  {
> -    asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
> +    asm volatile ("vmrun %0" : : "a"(virt_to_phys(test->vmcb)));
>  }
>
>  static bool check_vmrun(struct test *test)
> @@ -1241,7 +1241,7 @@ static bool lat_svm_insn_finished(struct test *test)
>
>      for ( ; runs != 0; runs--) {
>          tsc_start = rdtsc();
> -        asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
> +        asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory");
>          cycles = rdtsc() - tsc_start;
>          if (cycles > latvmload_max)
>              latvmload_max = cycles;
> @@ -1250,7 +1250,7 @@ static bool lat_svm_insn_finished(struct test *test)
>          vmload_sum += cycles;
>
>          tsc_start = rdtsc();
> -        asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
> +        asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory");
>          cycles = rdtsc() - tsc_start;
>          if (cycles > latvmsave_max)
>              latvmsave_max = cycles;

Ping.
Paolo Bonzini Dec. 12, 2019, 12:40 a.m. UTC | #2
On 11/12/19 21:22, Jim Mattson wrote:
> On Wed, Feb 20, 2019 at 12:25 PM Peter Shier <pshier@google.com> wrote:
>>
>> The GNU assembler (gas) allows omitting operands where there is only a
>> single choice e.g. "VMRUN rAX".The LLVM assembler requires those operands
>> even though they are the default and only choice.
>>
>> In addition, LLVM does not allow a CLGI instruction with a terminating
>> \n\t. Adding a ; separator after the instruction is a workaround.
>>
>> Signed-off-by: Peter Shier <pshier@google.com>
>> Reviewed-by: Marc Orr <marcorr@google.com>
>> Reviewed-by: Jim Mattson <jmattson@google.com>
>> ---
>>  x86/svm.c | 16 ++++++++--------
>>  1 file changed, 8 insertions(+), 8 deletions(-)
>>
>> diff --git a/x86/svm.c b/x86/svm.c
>> index bc74e7c690a8..e5cb730b08cb 100644
>> --- a/x86/svm.c
>> +++ b/x86/svm.c
>> @@ -154,7 +154,7 @@ static void vmcb_ident(struct vmcb *vmcb)
>>      struct descriptor_table_ptr desc_table_ptr;
>>
>>      memset(vmcb, 0, sizeof(*vmcb));
>> -    asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
>> +    asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
>>      vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
>>      vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
>>      vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
>> @@ -262,20 +262,20 @@ static void test_run(struct test *test, struct vmcb *vmcb)
>>      do {
>>          tsc_start = rdtsc();
>>          asm volatile (
>> -            "clgi \n\t"
>> -            "vmload \n\t"
>> +            "clgi;\n\t" // semi-colon needed for LLVM compatibility
>> +            "vmload %0\n\t"
>>              "mov regs+0x80, %%r15\n\t"  // rflags
>>              "mov %%r15, 0x170(%0)\n\t"
>>              "mov regs, %%r15\n\t"       // rax
>>              "mov %%r15, 0x1f8(%0)\n\t"
>>              LOAD_GPR_C
>> -            "vmrun \n\t"
>> +            "vmrun %0\n\t"
>>              SAVE_GPR_C
>>              "mov 0x170(%0), %%r15\n\t"  // rflags
>>              "mov %%r15, regs+0x80\n\t"
>>              "mov 0x1f8(%0), %%r15\n\t"  // rax
>>              "mov %%r15, regs\n\t"
>> -            "vmsave \n\t"
>> +            "vmsave %0\n\t"
>>              "stgi"
>>              : : "a"(vmcb_phys)
>>              : "rbx", "rcx", "rdx", "rsi",
>> @@ -330,7 +330,7 @@ static bool check_no_vmrun_int(struct test *test)
>>
>>  static void test_vmrun(struct test *test)
>>  {
>> -    asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
>> +    asm volatile ("vmrun %0" : : "a"(virt_to_phys(test->vmcb)));
>>  }
>>
>>  static bool check_vmrun(struct test *test)
>> @@ -1241,7 +1241,7 @@ static bool lat_svm_insn_finished(struct test *test)
>>
>>      for ( ; runs != 0; runs--) {
>>          tsc_start = rdtsc();
>> -        asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
>> +        asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory");
>>          cycles = rdtsc() - tsc_start;
>>          if (cycles > latvmload_max)
>>              latvmload_max = cycles;
>> @@ -1250,7 +1250,7 @@ static bool lat_svm_insn_finished(struct test *test)
>>          vmload_sum += cycles;
>>
>>          tsc_start = rdtsc();
>> -        asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
>> +        asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory");
>>          cycles = rdtsc() - tsc_start;
>>          if (cycles > latvmsave_max)
>>              latvmsave_max = cycles;
> 
> Ping.
> 

I am applying it, but I'm seriously puzzled by the clgi one.  Can you
open a bug on LLVM?

Paolo
Fangrui Song Dec. 14, 2019, 7:05 p.m. UTC | #3
> I am applying it, but I'm seriously puzzled by the clgi one.  Can you
> open a bug on LLVM?
> 
> Paolo

The clgi change does not appear to be needed.

- "clgi \n\t"
+ "clgi;\n\t"

clang 7.0.0, 8.0.0 (downloaded from releases.llvm.org) and HEAD (built from source) compile
"clgi" fine.

% ~/ccls/build/clang+llvm-7.0.0-x86_64-linux-gnu-ubuntu-16.04/bin/clang -mno-red-zone -mno-sse -mno-sse2 -m64 -O1 -g -MMD -MF x86/.emulator.d -Wall -Wwrite-strings -Wempty-body -Wuninitialized -Wignored-qualifiers -fno-omit-frame-pointer -fno-pic -Woverride-init -Wmissing-prototypes -Wstrict-prototypes -std=gnu99 -ffreestanding -I /tmp/p/kvm-unit-tests/lib -I /tmp/p/kvm-unit-tests/lib/x86 -I lib -c -o x86/svm.o x86/svm.c -w
# succeeded
Paolo Bonzini Dec. 18, 2019, 5:33 p.m. UTC | #4
On 11/12/19 21:22, Jim Mattson wrote:
> On Wed, Feb 20, 2019 at 12:25 PM Peter Shier <pshier@google.com> wrote:
>>
>> The GNU assembler (gas) allows omitting operands where there is only a
>> single choice e.g. "VMRUN rAX".The LLVM assembler requires those operands
>> even though they are the default and only choice.
>>
>> In addition, LLVM does not allow a CLGI instruction with a terminating
>> \n\t. Adding a ; separator after the instruction is a workaround.
>>
>> Signed-off-by: Peter Shier <pshier@google.com>
>> Reviewed-by: Marc Orr <marcorr@google.com>
>> Reviewed-by: Jim Mattson <jmattson@google.com>
>> ---
>>  x86/svm.c | 16 ++++++++--------
>>  1 file changed, 8 insertions(+), 8 deletions(-)
>>
>> diff --git a/x86/svm.c b/x86/svm.c
>> index bc74e7c690a8..e5cb730b08cb 100644
>> --- a/x86/svm.c
>> +++ b/x86/svm.c
>> @@ -154,7 +154,7 @@ static void vmcb_ident(struct vmcb *vmcb)
>>      struct descriptor_table_ptr desc_table_ptr;
>>
>>      memset(vmcb, 0, sizeof(*vmcb));
>> -    asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
>> +    asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
>>      vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
>>      vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
>>      vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
>> @@ -262,20 +262,20 @@ static void test_run(struct test *test, struct vmcb *vmcb)
>>      do {
>>          tsc_start = rdtsc();
>>          asm volatile (
>> -            "clgi \n\t"
>> -            "vmload \n\t"
>> +            "clgi;\n\t" // semi-colon needed for LLVM compatibility
>> +            "vmload %0\n\t"
>>              "mov regs+0x80, %%r15\n\t"  // rflags
>>              "mov %%r15, 0x170(%0)\n\t"
>>              "mov regs, %%r15\n\t"       // rax
>>              "mov %%r15, 0x1f8(%0)\n\t"
>>              LOAD_GPR_C
>> -            "vmrun \n\t"
>> +            "vmrun %0\n\t"
>>              SAVE_GPR_C
>>              "mov 0x170(%0), %%r15\n\t"  // rflags
>>              "mov %%r15, regs+0x80\n\t"
>>              "mov 0x1f8(%0), %%r15\n\t"  // rax
>>              "mov %%r15, regs\n\t"
>> -            "vmsave \n\t"
>> +            "vmsave %0\n\t"
>>              "stgi"
>>              : : "a"(vmcb_phys)
>>              : "rbx", "rcx", "rdx", "rsi",
>> @@ -330,7 +330,7 @@ static bool check_no_vmrun_int(struct test *test)
>>
>>  static void test_vmrun(struct test *test)
>>  {
>> -    asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
>> +    asm volatile ("vmrun %0" : : "a"(virt_to_phys(test->vmcb)));
>>  }
>>
>>  static bool check_vmrun(struct test *test)
>> @@ -1241,7 +1241,7 @@ static bool lat_svm_insn_finished(struct test *test)
>>
>>      for ( ; runs != 0; runs--) {
>>          tsc_start = rdtsc();
>> -        asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
>> +        asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory");
>>          cycles = rdtsc() - tsc_start;
>>          if (cycles > latvmload_max)
>>              latvmload_max = cycles;
>> @@ -1250,7 +1250,7 @@ static bool lat_svm_insn_finished(struct test *test)
>>          vmload_sum += cycles;
>>
>>          tsc_start = rdtsc();
>> -        asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
>> +        asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory");
>>          cycles = rdtsc() - tsc_start;
>>          if (cycles > latvmsave_max)
>>              latvmsave_max = cycles;
> 
> Ping.
> 

Applied.

Paolo
diff mbox series

Patch

diff --git a/x86/svm.c b/x86/svm.c
index bc74e7c690a8..e5cb730b08cb 100644
--- a/x86/svm.c
+++ b/x86/svm.c
@@ -154,7 +154,7 @@  static void vmcb_ident(struct vmcb *vmcb)
     struct descriptor_table_ptr desc_table_ptr;
 
     memset(vmcb, 0, sizeof(*vmcb));
-    asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
+    asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
@@ -262,20 +262,20 @@  static void test_run(struct test *test, struct vmcb *vmcb)
     do {
         tsc_start = rdtsc();
         asm volatile (
-            "clgi \n\t"
-            "vmload \n\t"
+            "clgi;\n\t" // semi-colon needed for LLVM compatibility
+            "vmload %0\n\t"
             "mov regs+0x80, %%r15\n\t"  // rflags
             "mov %%r15, 0x170(%0)\n\t"
             "mov regs, %%r15\n\t"       // rax
             "mov %%r15, 0x1f8(%0)\n\t"
             LOAD_GPR_C
-            "vmrun \n\t"
+            "vmrun %0\n\t"
             SAVE_GPR_C
             "mov 0x170(%0), %%r15\n\t"  // rflags
             "mov %%r15, regs+0x80\n\t"
             "mov 0x1f8(%0), %%r15\n\t"  // rax
             "mov %%r15, regs\n\t"
-            "vmsave \n\t"
+            "vmsave %0\n\t"
             "stgi"
             : : "a"(vmcb_phys)
             : "rbx", "rcx", "rdx", "rsi",
@@ -330,7 +330,7 @@  static bool check_no_vmrun_int(struct test *test)
 
 static void test_vmrun(struct test *test)
 {
-    asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
+    asm volatile ("vmrun %0" : : "a"(virt_to_phys(test->vmcb)));
 }
 
 static bool check_vmrun(struct test *test)
@@ -1241,7 +1241,7 @@  static bool lat_svm_insn_finished(struct test *test)
 
     for ( ; runs != 0; runs--) {
         tsc_start = rdtsc();
-        asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
+        asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory");
         cycles = rdtsc() - tsc_start;
         if (cycles > latvmload_max)
             latvmload_max = cycles;
@@ -1250,7 +1250,7 @@  static bool lat_svm_insn_finished(struct test *test)
         vmload_sum += cycles;
 
         tsc_start = rdtsc();
-        asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
+        asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory");
         cycles = rdtsc() - tsc_start;
         if (cycles > latvmsave_max)
             latvmsave_max = cycles;