From patchwork Tue Jul 31 15:32:08 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Sean Christopherson X-Patchwork-Id: 10550951 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 8D60E14E2 for ; Tue, 31 Jul 2018 15:32:57 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 7DFD728F03 for ; Tue, 31 Jul 2018 15:32:57 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 721D3295EC; Tue, 31 Jul 2018 15:32:57 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 0651728F03 for ; Tue, 31 Jul 2018 15:32:57 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732503AbeGaRNq (ORCPT ); Tue, 31 Jul 2018 13:13:46 -0400 Received: from mga03.intel.com ([134.134.136.65]:23684 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732395AbeGaRN0 (ORCPT ); Tue, 31 Jul 2018 13:13:26 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 31 Jul 2018 08:32:34 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.51,427,1526367600"; d="scan'208";a="79342636" Received: from sjchrist-coffee.jf.intel.com ([10.54.74.132]) by orsmga002.jf.intel.com with ESMTP; 31 Jul 2018 08:32:20 -0700 From: Sean Christopherson To: kvm@vger.kernel.org, pbonzini@redhat.com, rkrcmar@redhat.com Cc: jmattson@google.com, Sean Christopherson Subject: [PATCH 09/16] KVM: nVMX: assimilate nested_vmx_entry_failure() into nested_vmx_enter_non_root_mode() Date: Tue, 31 Jul 2018 08:32:08 -0700 Message-Id: <20180731153215.31794-10-sean.j.christopherson@intel.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20180731153215.31794-1-sean.j.christopherson@intel.com> References: <20180731153215.31794-1-sean.j.christopherson@intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP In addition to consolidating code, removing nested_vmx_entry_failure() eliminates a confusing function name and label. For a VMEntry, "fail" and its derivatives has a very specific meaning due to the different behavior of a VMEnter VMFail versus VMExit, i.e. a more appropriate name for nested_vmx_entry_failure() would have been something like nested_vmx_entry_consistency_check_vmexit(). Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx.c | 63 ++++++++++++++++++---------------------------- 1 file changed, 25 insertions(+), 38 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c01ae6e7c323..52c29d12d0e8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1805,9 +1805,6 @@ static inline bool is_nmi(u32 intr_info) static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification); -static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12, - u32 reason, unsigned long qualification); static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) { @@ -11765,24 +11762,23 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, return 0; } +static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12); + static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); bool is_vmentry = vmx->nested.nested_run_pending; + u32 exit_reason = EXIT_REASON_INVALID_STATE; u32 msr_entry_idx; u32 exit_qual; - int r; if (!is_vmentry) goto enter_non_root_mode; - r = check_vmentry_postreqs(vcpu, vmcs12, &exit_qual); - if (r) { - nested_vmx_entry_failure(vcpu, vmcs12, - EXIT_REASON_INVALID_STATE, exit_qual); - return 1; - } + if (check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) + goto consistency_check_vmexit; enter_non_root_mode: enter_guest_mode(vcpu); @@ -11796,13 +11792,12 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu) if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vcpu->arch.tsc_offset += vmcs12->tsc_offset; - r = EXIT_REASON_INVALID_STATE; if (prepare_vmcs02(vcpu, vmcs12, &exit_qual)) goto fail; nested_get_vmcs12_pages(vcpu, vmcs12); - r = EXIT_REASON_MSR_LOAD_FAIL; + exit_reason = EXIT_REASON_MSR_LOAD_FAIL; msr_entry_idx = nested_vmx_load_msr(vcpu, vmcs12->vm_entry_msr_load_addr, vmcs12->vm_entry_msr_load_count); @@ -11822,7 +11817,24 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu) vcpu->arch.tsc_offset -= vmcs12->tsc_offset; leave_guest_mode(vcpu); vmx_switch_vmcs(vcpu, &vmx->vmcs01); - nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual); + + /* + * A consistency check VMExit during L1's VMEnter to L2 is a subset + * of a normal VMexit, as explained in 23.7 "VM-entry failures during + * or after loading guest state" (this also lists the acceptable exit- + * reason and exit-qualification parameters). + */ +consistency_check_vmexit: + vm_entry_controls_reset_shadow(vmx); + vm_exit_controls_reset_shadow(vmx); + vmx_segment_cache_clear(vmx); + + load_vmcs12_host_state(vcpu, vmcs12); + vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY; + vmcs12->exit_qualification = exit_qual; + nested_vmx_succeed(vcpu); + if (enable_shadow_vmcs) + vmx->nested.sync_shadow_vmcs = true; return 1; } @@ -12533,31 +12545,6 @@ static void vmx_leave_nested(struct kvm_vcpu *vcpu) free_nested(to_vmx(vcpu)); } -/* - * L1's failure to enter L2 is a subset of a normal exit, as explained in - * 23.7 "VM-entry failures during or after loading guest state" (this also - * lists the acceptable exit-reason and exit-qualification parameters). - * It should only be called before L2 actually succeeded to run, and when - * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss). - */ -static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12, - u32 reason, unsigned long qualification) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - vm_entry_controls_reset_shadow(vmx); - vm_exit_controls_reset_shadow(vmx); - vmx_segment_cache_clear(vmx); - - load_vmcs12_host_state(vcpu, vmcs12); - vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY; - vmcs12->exit_qualification = qualification; - nested_vmx_succeed(vcpu); - if (enable_shadow_vmcs) - vmx->nested.sync_shadow_vmcs = true; -} - static int vmx_check_intercept(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage)