diff mbox series

[v2] KVM: VMX: Remove unneeded __ASM_SIZE usage with POP instruction

Message ID 20200427205035.1594232-1-ubizjak@gmail.com (mailing list archive)
State New, archived
Headers show
Series [v2] KVM: VMX: Remove unneeded __ASM_SIZE usage with POP instruction | expand

Commit Message

Uros Bizjak April 27, 2020, 8:50 p.m. UTC
POP [mem] defaults to the word size, and the only legal non-default
size is 16 bits, e.g. a 32-bit POP will #UD in 64-bit mode and vice
versa, no need to use __ASM_SIZE macro to force operating mode.

Changes since v1:
- Fix commit message.

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
---
 arch/x86/kvm/vmx/vmenter.S | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

Comments

Paolo Bonzini May 4, 2020, 4:52 p.m. UTC | #1
On 27/04/20 22:50, Uros Bizjak wrote:
> POP [mem] defaults to the word size, and the only legal non-default
> size is 16 bits, e.g. a 32-bit POP will #UD in 64-bit mode and vice
> versa, no need to use __ASM_SIZE macro to force operating mode.
> 
> Changes since v1:
> - Fix commit message.
> 
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Cc: Sean Christopherson <sean.j.christopherson@intel.com>
> Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
> Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> ---
>  arch/x86/kvm/vmx/vmenter.S | 14 +++++++-------
>  1 file changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> index 87f3f24fef37..94b8794bdd2a 100644
> --- a/arch/x86/kvm/vmx/vmenter.S
> +++ b/arch/x86/kvm/vmx/vmenter.S
> @@ -163,13 +163,13 @@ SYM_FUNC_START(__vmx_vcpu_run)
>  	mov WORD_SIZE(%_ASM_SP), %_ASM_AX
>  
>  	/* Save all guest registers, including RAX from the stack */
> -	__ASM_SIZE(pop) VCPU_RAX(%_ASM_AX)
> -	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
> -	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
> -	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
> -	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
> -	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
> -	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
> +	pop           VCPU_RAX(%_ASM_AX)
> +	mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
> +	mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
> +	mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
> +	mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
> +	mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
> +	mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
>  #ifdef CONFIG_X86_64
>  	mov %r8,  VCPU_R8 (%_ASM_AX)
>  	mov %r9,  VCPU_R9 (%_ASM_AX)
> 

Queued, thanks.

Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 87f3f24fef37..94b8794bdd2a 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -163,13 +163,13 @@  SYM_FUNC_START(__vmx_vcpu_run)
 	mov WORD_SIZE(%_ASM_SP), %_ASM_AX
 
 	/* Save all guest registers, including RAX from the stack */
-	__ASM_SIZE(pop) VCPU_RAX(%_ASM_AX)
-	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
-	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
-	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
-	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
-	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
-	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
+	pop           VCPU_RAX(%_ASM_AX)
+	mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
+	mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
+	mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
+	mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
+	mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
+	mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
 #ifdef CONFIG_X86_64
 	mov %r8,  VCPU_R8 (%_ASM_AX)
 	mov %r9,  VCPU_R9 (%_ASM_AX)