From patchwork Fri Jan 18 21:20:35 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Sean Christopherson X-Patchwork-Id: 10771545 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id C117313BF for ; Fri, 18 Jan 2019 21:22:23 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id AE7BE305D8 for ; Fri, 18 Jan 2019 21:22:23 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id AC677304D7; Fri, 18 Jan 2019 21:22:23 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 30432305DE for ; Fri, 18 Jan 2019 21:22:23 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1729731AbfARVWU (ORCPT ); Fri, 18 Jan 2019 16:22:20 -0500 Received: from mga05.intel.com ([192.55.52.43]:43596 "EHLO mga05.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1729723AbfARVWJ (ORCPT ); Fri, 18 Jan 2019 16:22:09 -0500 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 18 Jan 2019 13:22:08 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.56,492,1539673200"; d="scan'208";a="139511535" Received: from sjchrist-coffee.jf.intel.com ([10.54.74.14]) by fmsmga001.fm.intel.com with ESMTP; 18 Jan 2019 13:22:08 -0800 From: Sean Christopherson To: Paolo Bonzini , =?utf-8?b?UmFkaW0gS3LEjW3DocWZ?= Cc: kvm@vger.kernel.org, Andi Kleen , Josh Poimboeuf Subject: [PATCH 27/29] KVM: VMX: Use RAX as the scratch register during vCPU-run Date: Fri, 18 Jan 2019 13:20:35 -0800 Message-Id: <20190118212037.24412-28-sean.j.christopherson@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20190118212037.24412-1-sean.j.christopherson@intel.com> References: <20190118212037.24412-1-sean.j.christopherson@intel.com> MIME-Version: 1.0 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Prepare for making __vmx_vcpu_run() callable from C code. That means returning the result in RAX. Since RAX will be used to return the result, use it as the scratch register as well to make the code readable and to document that the scratch register is more more less arbitrary. Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmenter.S | 82 +++++++++++++++++++------------------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index 0a0d6f2c1e8c..45e3e381d41d 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -102,31 +102,31 @@ ENTRY(__vmx_vcpu_run) lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2 call vmx_update_host_rsp - /* Load @regs to RCX. */ - mov (%_ASM_SP), %_ASM_CX + /* Load @regs to RAX. */ + mov (%_ASM_SP), %_ASM_AX /* Check if vmlaunch or vmresume is needed */ cmpb $0, %bl /* Load guest registers. Don't clobber flags. */ - mov VCPU_RAX(%_ASM_CX), %_ASM_AX - mov VCPU_RBX(%_ASM_CX), %_ASM_BX - mov VCPU_RDX(%_ASM_CX), %_ASM_DX - mov VCPU_RSI(%_ASM_CX), %_ASM_SI - mov VCPU_RDI(%_ASM_CX), %_ASM_DI - mov VCPU_RBP(%_ASM_CX), %_ASM_BP + mov VCPU_RBX(%_ASM_AX), %_ASM_BX + mov VCPU_RCX(%_ASM_AX), %_ASM_CX + mov VCPU_RDX(%_ASM_AX), %_ASM_DX + mov VCPU_RSI(%_ASM_AX), %_ASM_SI + mov VCPU_RDI(%_ASM_AX), %_ASM_DI + mov VCPU_RBP(%_ASM_AX), %_ASM_BP #ifdef CONFIG_X86_64 - mov VCPU_R8 (%_ASM_CX), %r8 - mov VCPU_R9 (%_ASM_CX), %r9 - mov VCPU_R10(%_ASM_CX), %r10 - mov VCPU_R11(%_ASM_CX), %r11 - mov VCPU_R12(%_ASM_CX), %r12 - mov VCPU_R13(%_ASM_CX), %r13 - mov VCPU_R14(%_ASM_CX), %r14 - mov VCPU_R15(%_ASM_CX), %r15 + mov VCPU_R8 (%_ASM_AX), %r8 + mov VCPU_R9 (%_ASM_AX), %r9 + mov VCPU_R10(%_ASM_AX), %r10 + mov VCPU_R11(%_ASM_AX), %r11 + mov VCPU_R12(%_ASM_AX), %r12 + mov VCPU_R13(%_ASM_AX), %r13 + mov VCPU_R14(%_ASM_AX), %r14 + mov VCPU_R15(%_ASM_AX), %r15 #endif - /* Load guest RCX. This kills the vmx_vcpu pointer! */ - mov VCPU_RCX(%_ASM_CX), %_ASM_CX + /* Load guest RAX. This kills the vmx_vcpu pointer! */ + mov VCPU_RAX(%_ASM_AX), %_ASM_AX /* Enter guest mode */ call vmx_vmenter @@ -134,29 +134,29 @@ ENTRY(__vmx_vcpu_run) /* Jump on VM-Fail. */ jbe 2f - /* Temporarily save guest's RCX. */ - push %_ASM_CX + /* Temporarily save guest's RAX. */ + push %_ASM_AX - /* Reload @regs to RCX. */ - mov WORD_SIZE(%_ASM_SP), %_ASM_CX + /* Reload @regs to RAX. */ + mov WORD_SIZE(%_ASM_SP), %_ASM_AX - /* Save all guest registers, including RCX from the stack */ - mov %_ASM_AX, VCPU_RAX(%_ASM_CX) - mov %_ASM_BX, VCPU_RBX(%_ASM_CX) - __ASM_SIZE(pop) VCPU_RCX(%_ASM_CX) - mov %_ASM_DX, VCPU_RDX(%_ASM_CX) - mov %_ASM_SI, VCPU_RSI(%_ASM_CX) - mov %_ASM_DI, VCPU_RDI(%_ASM_CX) - mov %_ASM_BP, VCPU_RBP(%_ASM_CX) + /* Save all guest registers, including RAX from the stack */ + __ASM_SIZE(pop) VCPU_RAX(%_ASM_AX) + mov %_ASM_BX, VCPU_RBX(%_ASM_AX) + mov %_ASM_CX, VCPU_RCX(%_ASM_AX) + mov %_ASM_DX, VCPU_RDX(%_ASM_AX) + mov %_ASM_SI, VCPU_RSI(%_ASM_AX) + mov %_ASM_DI, VCPU_RDI(%_ASM_AX) + mov %_ASM_BP, VCPU_RBP(%_ASM_AX) #ifdef CONFIG_X86_64 - mov %r8, VCPU_R8 (%_ASM_CX) - mov %r9, VCPU_R9 (%_ASM_CX) - mov %r10, VCPU_R10(%_ASM_CX) - mov %r11, VCPU_R11(%_ASM_CX) - mov %r12, VCPU_R12(%_ASM_CX) - mov %r13, VCPU_R13(%_ASM_CX) - mov %r14, VCPU_R14(%_ASM_CX) - mov %r15, VCPU_R15(%_ASM_CX) + mov %r8, VCPU_R8 (%_ASM_AX) + mov %r9, VCPU_R9 (%_ASM_AX) + mov %r10, VCPU_R10(%_ASM_AX) + mov %r11, VCPU_R11(%_ASM_AX) + mov %r12, VCPU_R12(%_ASM_AX) + mov %r13, VCPU_R13(%_ASM_AX) + mov %r14, VCPU_R14(%_ASM_AX) + mov %r15, VCPU_R15(%_ASM_AX) #endif /* Clear EBX to indicate VM-Exit (as opposed to VM-Fail). */ @@ -177,7 +177,7 @@ ENTRY(__vmx_vcpu_run) xor %r14d, %r14d xor %r15d, %r15d #endif - xor %eax, %eax + xor %ecx, %ecx xor %edx, %edx xor %esi, %esi xor %edi, %edi @@ -190,9 +190,9 @@ ENTRY(__vmx_vcpu_run) /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */ 2: mov $1, %ebx /* - * RCX holds a guest value and it's not cleared in the common + * RAX holds a guest value and it's not cleared in the common * exit path as VM-Exit reloads it with the vcpu_vmx pointer. */ - xor %ecx, %ecx + xor %eax, %eax jmp 1b ENDPROC(__vmx_vcpu_run)