diff mbox series

[6/5] x86/ELF: drop unnecessary volatile from asm()-s in elf_core_save_regs()

Message ID 06b8dedb-49d9-eeb6-d56a-c7852486d22e@suse.com (mailing list archive)
State New, archived
Headers show
Series x86: introduce read_sregs() and elf_core_save_regs() adjustments | expand

Commit Message

Jan Beulich Sept. 28, 2020, 3:04 p.m. UTC
There are no hidden side effects here.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: New.

Comments

Andrew Cooper Sept. 28, 2020, 3:15 p.m. UTC | #1
On 28/09/2020 16:04, Jan Beulich wrote:
> There are no hidden side effects here.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v2: New.
>
> --- a/xen/include/asm-x86/x86_64/elf.h
> +++ b/xen/include/asm-x86/x86_64/elf.h
> @@ -37,26 +37,26 @@ typedef struct {
>  static inline void elf_core_save_regs(ELF_Gregset *core_regs, 
>                                        crash_xen_core_t *xen_core_regs)
>  {
> -    asm volatile("movq %%r15,%0" : "=m"(core_regs->r15));
> -    asm volatile("movq %%r14,%0" : "=m"(core_regs->r14));
> -    asm volatile("movq %%r13,%0" : "=m"(core_regs->r13));
> -    asm volatile("movq %%r12,%0" : "=m"(core_regs->r12));
> -    asm volatile("movq %%rbp,%0" : "=m"(core_regs->rbp));
> -    asm volatile("movq %%rbx,%0" : "=m"(core_regs->rbx));
> -    asm volatile("movq %%r11,%0" : "=m"(core_regs->r11));
> -    asm volatile("movq %%r10,%0" : "=m"(core_regs->r10));
> -    asm volatile("movq %%r9,%0" : "=m"(core_regs->r9));
> -    asm volatile("movq %%r8,%0" : "=m"(core_regs->r8));
> -    asm volatile("movq %%rax,%0" : "=m"(core_regs->rax));
> -    asm volatile("movq %%rcx,%0" : "=m"(core_regs->rcx));
> -    asm volatile("movq %%rdx,%0" : "=m"(core_regs->rdx));
> -    asm volatile("movq %%rsi,%0" : "=m"(core_regs->rsi));
> -    asm volatile("movq %%rdi,%0" : "=m"(core_regs->rdi));
> +    asm ( "movq %%r15,%0" : "=m" (core_regs->r15) );
> +    asm ( "movq %%r14,%0" : "=m" (core_regs->r14) );
> +    asm ( "movq %%r13,%0" : "=m" (core_regs->r13) );
> +    asm ( "movq %%r12,%0" : "=m" (core_regs->r12) );
> +    asm ( "movq %%rbp,%0" : "=m" (core_regs->rbp) );
> +    asm ( "movq %%rbx,%0" : "=m" (core_regs->rbx) );
> +    asm ( "movq %%r11,%0" : "=m" (core_regs->r11) );
> +    asm ( "movq %%r10,%0" : "=m" (core_regs->r10) );
> +    asm ( "movq %%r9,%0" : "=m" (core_regs->r9) );
> +    asm ( "movq %%r8,%0" : "=m" (core_regs->r8) );

Any chance we can align these seeing as they're changing?

What about spaces before %0 ?

Either way, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

> +    asm ( "movq %%rax,%0" : "=m" (core_regs->rax) );
> +    asm ( "movq %%rcx,%0" : "=m" (core_regs->rcx) );
> +    asm ( "movq %%rdx,%0" : "=m" (core_regs->rdx) );
> +    asm ( "movq %%rsi,%0" : "=m" (core_regs->rsi) );
> +    asm ( "movq %%rdi,%0" : "=m" (core_regs->rdi) );
>      /* orig_rax not filled in for now */
>      asm ( "call 0f; 0: popq %0" : "=m" (core_regs->rip) );
>      core_regs->cs = read_sreg(cs);
> -    asm volatile("pushfq; popq %0" :"=m"(core_regs->rflags));
> -    asm volatile("movq %%rsp,%0" : "=m"(core_regs->rsp));
> +    asm ( "pushfq; popq %0" : "=m" (core_regs->rflags) );
> +    asm ( "movq %%rsp,%0" : "=m" (core_regs->rsp) );
>      core_regs->ss = read_sreg(ss);
>      rdmsrl(MSR_FS_BASE, core_regs->thread_fs);
>      rdmsrl(MSR_GS_BASE, core_regs->thread_gs);
>
Jan Beulich Sept. 28, 2020, 3:40 p.m. UTC | #2
On 28.09.2020 17:15, Andrew Cooper wrote:
> On 28/09/2020 16:04, Jan Beulich wrote:
>> There are no hidden side effects here.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> ---
>> v2: New.
>>
>> --- a/xen/include/asm-x86/x86_64/elf.h
>> +++ b/xen/include/asm-x86/x86_64/elf.h
>> @@ -37,26 +37,26 @@ typedef struct {
>>  static inline void elf_core_save_regs(ELF_Gregset *core_regs, 
>>                                        crash_xen_core_t *xen_core_regs)
>>  {
>> -    asm volatile("movq %%r15,%0" : "=m"(core_regs->r15));
>> -    asm volatile("movq %%r14,%0" : "=m"(core_regs->r14));
>> -    asm volatile("movq %%r13,%0" : "=m"(core_regs->r13));
>> -    asm volatile("movq %%r12,%0" : "=m"(core_regs->r12));
>> -    asm volatile("movq %%rbp,%0" : "=m"(core_regs->rbp));
>> -    asm volatile("movq %%rbx,%0" : "=m"(core_regs->rbx));
>> -    asm volatile("movq %%r11,%0" : "=m"(core_regs->r11));
>> -    asm volatile("movq %%r10,%0" : "=m"(core_regs->r10));
>> -    asm volatile("movq %%r9,%0" : "=m"(core_regs->r9));
>> -    asm volatile("movq %%r8,%0" : "=m"(core_regs->r8));
>> -    asm volatile("movq %%rax,%0" : "=m"(core_regs->rax));
>> -    asm volatile("movq %%rcx,%0" : "=m"(core_regs->rcx));
>> -    asm volatile("movq %%rdx,%0" : "=m"(core_regs->rdx));
>> -    asm volatile("movq %%rsi,%0" : "=m"(core_regs->rsi));
>> -    asm volatile("movq %%rdi,%0" : "=m"(core_regs->rdi));
>> +    asm ( "movq %%r15,%0" : "=m" (core_regs->r15) );
>> +    asm ( "movq %%r14,%0" : "=m" (core_regs->r14) );
>> +    asm ( "movq %%r13,%0" : "=m" (core_regs->r13) );
>> +    asm ( "movq %%r12,%0" : "=m" (core_regs->r12) );
>> +    asm ( "movq %%rbp,%0" : "=m" (core_regs->rbp) );
>> +    asm ( "movq %%rbx,%0" : "=m" (core_regs->rbx) );
>> +    asm ( "movq %%r11,%0" : "=m" (core_regs->r11) );
>> +    asm ( "movq %%r10,%0" : "=m" (core_regs->r10) );
>> +    asm ( "movq %%r9,%0" : "=m" (core_regs->r9) );
>> +    asm ( "movq %%r8,%0" : "=m" (core_regs->r8) );
> 
> Any chance we can align these seeing as they're changing?

I wasn't really sure about this - alignment to cover for the
difference between r8 and r9 vs r10-r15 never comes out nicely,
as the padding should really be in the number part of the
names. I'd prefer to leave it as is, while ...

> What about spaces before %0 ?

... I certainly will add these (as I should have noticed their
lack myself).

> Either way, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

Thanks.

Jan
diff mbox series

Patch

--- a/xen/include/asm-x86/x86_64/elf.h
+++ b/xen/include/asm-x86/x86_64/elf.h
@@ -37,26 +37,26 @@  typedef struct {
 static inline void elf_core_save_regs(ELF_Gregset *core_regs, 
                                       crash_xen_core_t *xen_core_regs)
 {
-    asm volatile("movq %%r15,%0" : "=m"(core_regs->r15));
-    asm volatile("movq %%r14,%0" : "=m"(core_regs->r14));
-    asm volatile("movq %%r13,%0" : "=m"(core_regs->r13));
-    asm volatile("movq %%r12,%0" : "=m"(core_regs->r12));
-    asm volatile("movq %%rbp,%0" : "=m"(core_regs->rbp));
-    asm volatile("movq %%rbx,%0" : "=m"(core_regs->rbx));
-    asm volatile("movq %%r11,%0" : "=m"(core_regs->r11));
-    asm volatile("movq %%r10,%0" : "=m"(core_regs->r10));
-    asm volatile("movq %%r9,%0" : "=m"(core_regs->r9));
-    asm volatile("movq %%r8,%0" : "=m"(core_regs->r8));
-    asm volatile("movq %%rax,%0" : "=m"(core_regs->rax));
-    asm volatile("movq %%rcx,%0" : "=m"(core_regs->rcx));
-    asm volatile("movq %%rdx,%0" : "=m"(core_regs->rdx));
-    asm volatile("movq %%rsi,%0" : "=m"(core_regs->rsi));
-    asm volatile("movq %%rdi,%0" : "=m"(core_regs->rdi));
+    asm ( "movq %%r15,%0" : "=m" (core_regs->r15) );
+    asm ( "movq %%r14,%0" : "=m" (core_regs->r14) );
+    asm ( "movq %%r13,%0" : "=m" (core_regs->r13) );
+    asm ( "movq %%r12,%0" : "=m" (core_regs->r12) );
+    asm ( "movq %%rbp,%0" : "=m" (core_regs->rbp) );
+    asm ( "movq %%rbx,%0" : "=m" (core_regs->rbx) );
+    asm ( "movq %%r11,%0" : "=m" (core_regs->r11) );
+    asm ( "movq %%r10,%0" : "=m" (core_regs->r10) );
+    asm ( "movq %%r9,%0" : "=m" (core_regs->r9) );
+    asm ( "movq %%r8,%0" : "=m" (core_regs->r8) );
+    asm ( "movq %%rax,%0" : "=m" (core_regs->rax) );
+    asm ( "movq %%rcx,%0" : "=m" (core_regs->rcx) );
+    asm ( "movq %%rdx,%0" : "=m" (core_regs->rdx) );
+    asm ( "movq %%rsi,%0" : "=m" (core_regs->rsi) );
+    asm ( "movq %%rdi,%0" : "=m" (core_regs->rdi) );
     /* orig_rax not filled in for now */
     asm ( "call 0f; 0: popq %0" : "=m" (core_regs->rip) );
     core_regs->cs = read_sreg(cs);
-    asm volatile("pushfq; popq %0" :"=m"(core_regs->rflags));
-    asm volatile("movq %%rsp,%0" : "=m"(core_regs->rsp));
+    asm ( "pushfq; popq %0" : "=m" (core_regs->rflags) );
+    asm ( "movq %%rsp,%0" : "=m" (core_regs->rsp) );
     core_regs->ss = read_sreg(ss);
     rdmsrl(MSR_FS_BASE, core_regs->thread_fs);
     rdmsrl(MSR_GS_BASE, core_regs->thread_gs);