Message ID | 20250401222105.79309-5-dmukhin@ford.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | x86/asm: cleanups after toolchain baseline upgrade | expand |
On 01/04/2025 11:21 pm, dmkhn@proton.me wrote: > diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmx.h b/xen/arch/x86/include/asm/hvm/vmx/vmx.h > index 10c0619108..1d63e49288 100644 > --- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h > +++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h > @@ -507,15 +487,17 @@ static inline int __vmxon(u64 addr) > int rc; > > asm volatile ( > - "1: " VMXON_OPCODE MODRM_EAX_06 "\n" > - " setna %b0 ; neg %0\n" /* CF==1 or ZF==1 --> rc = -1 */ > + "1: vmxon (%[addr])\n" > + " setna %b[rc]\n" > + " neg %[rc]\n" /* CF==1 or ZF==1 --> rc = -1 */ > "2:\n" > ".section .fixup,\"ax\"\n" > - "3: sub $2,%0 ; jmp 2b\n" /* #UD or #GP --> rc = -2 */ > + "3: mov $-2, %[rc]\n" > + " jmp 2b\n" /* #UD or #GP --> rc = -2 */ > ".previous\n" > _ASM_EXTABLE(1b, 3b) > - : "=q" (rc) > - : "0" (0), "a" (&addr) > + : [rc] "=q" (rc) > + : "0" (0), [addr] "r" (&addr) > : "memory"); > > return rc; A variant of this patch (improvements to __vmxon() helper, or whatever) probably wants pulling out and doing earlier. For the function parameter, u64 addr wants to become paddr_t addr. Use "int rc = 0;" and [rc] "+q" (rc). That takes away the "0" (0) that is otherwise unconnected. Next, "vmxon %[addr]" and [addr] "m" (addr). The VMXON instruction strictly takes an m64 operand, and it doesn't need bouncing through another register. Finally, __vmx{on,off}() have single callers only in vmcs.c, and really shouldn't be in vmx.h which is included ~everywhere. You can move them into vmcs.c (probably after parse_ept_param_runtime()) to limit their scope. ~Andrew
On 02.04.2025 00:21, dmkhn@proton.me wrote: > @@ -497,9 +479,7 @@ static inline void vpid_sync_all(void) > > static inline void __vmxoff(void) > { > - asm volatile ( > - VMXOFF_OPCODE > - : : : "memory" ); > + asm volatile ("vmxoff" : : : "memory"); Nit (style): Blanks immediately inside parentheses please. Ideally ... > @@ -507,15 +487,17 @@ static inline int __vmxon(u64 addr) > int rc; > > asm volatile ( > - "1: " VMXON_OPCODE MODRM_EAX_06 "\n" > - " setna %b0 ; neg %0\n" /* CF==1 or ZF==1 --> rc = -1 */ > + "1: vmxon (%[addr])\n" > + " setna %b[rc]\n" > + " neg %[rc]\n" /* CF==1 or ZF==1 --> rc = -1 */ > "2:\n" > ".section .fixup,\"ax\"\n" > - "3: sub $2,%0 ; jmp 2b\n" /* #UD or #GP --> rc = -2 */ > + "3: mov $-2, %[rc]\n" > + " jmp 2b\n" /* #UD or #GP --> rc = -2 */ > ".previous\n" > _ASM_EXTABLE(1b, 3b) > - : "=q" (rc) > - : "0" (0), "a" (&addr) > + : [rc] "=q" (rc) > + : "0" (0), [addr] "r" (&addr) > : "memory"); ... you'd also take the opportunity to add the missing one here. Then again Andrew eliminates this altogether anyway. Jan
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmx.h b/xen/arch/x86/include/asm/hvm/vmx/vmx.h index 10c0619108..1d63e49288 100644 --- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h +++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h @@ -257,24 +257,6 @@ typedef union cr_access_qual { #define X86_SEG_AR_GRANULARITY (1u << 15) /* 15, granularity */ #define X86_SEG_AR_SEG_UNUSABLE (1u << 16) /* 16, segment unusable */ -#define VMCALL_OPCODE ".byte 0x0f,0x01,0xc1\n" -#define VMCLEAR_OPCODE ".byte 0x66,0x0f,0xc7\n" /* reg/opcode: /6 */ -#define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n" -#define VMPTRLD_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /6 */ -#define VMPTRST_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /7 */ -#define VMREAD_OPCODE ".byte 0x0f,0x78\n" -#define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n" -#define VMWRITE_OPCODE ".byte 0x0f,0x79\n" -#define INVEPT_OPCODE ".byte 0x66,0x0f,0x38,0x80\n" /* m128,r64/32 */ -#define INVVPID_OPCODE ".byte 0x66,0x0f,0x38,0x81\n" /* m128,r64/32 */ -#define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4\n" -#define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7\n" - -#define MODRM_EAX_08 ".byte 0x08\n" /* ECX, [EAX] */ -#define MODRM_EAX_06 ".byte 0x30\n" /* [EAX], with reg/opcode: /6 */ -#define MODRM_EAX_07 ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */ -#define MODRM_EAX_ECX ".byte 0xc1\n" /* EAX, ECX */ - extern uint8_t posted_intr_vector; #define cpu_has_vmx_ept_exec_only_supported \ @@ -497,9 +479,7 @@ static inline void vpid_sync_all(void) static inline void __vmxoff(void) { - asm volatile ( - VMXOFF_OPCODE - : : : "memory" ); + asm volatile ("vmxoff" : : : "memory"); } static inline int __vmxon(u64 addr) @@ -507,15 +487,17 @@ static inline int __vmxon(u64 addr) int rc; asm volatile ( - "1: " VMXON_OPCODE MODRM_EAX_06 "\n" - " setna %b0 ; neg %0\n" /* CF==1 or ZF==1 --> rc = -1 */ + "1: vmxon (%[addr])\n" + " setna %b[rc]\n" + " neg %[rc]\n" /* CF==1 or ZF==1 --> rc = -1 */ "2:\n" ".section .fixup,\"ax\"\n" - "3: sub $2,%0 ; jmp 2b\n" /* #UD or #GP --> rc = -2 */ + "3: mov $-2, %[rc]\n" + " jmp 2b\n" /* #UD or #GP --> rc = -2 */ ".previous\n" _ASM_EXTABLE(1b, 3b) - : "=q" (rc) - : "0" (0), "a" (&addr) + : [rc] "=q" (rc) + : "0" (0), [addr] "r" (&addr) : "memory"); return rc;