From patchwork Thu Oct 8 05:57:26 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wanpeng Li X-Patchwork-Id: 7349351 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 26A7FBEEA4 for ; Thu, 8 Oct 2015 07:09:32 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 2CA2C20628 for ; Thu, 8 Oct 2015 07:09:31 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 1FFBB20626 for ; Thu, 8 Oct 2015 07:09:30 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753411AbbJHHJV (ORCPT ); Thu, 8 Oct 2015 03:09:21 -0400 Received: from blu004-omc1s31.hotmail.com ([65.55.116.42]:56558 "EHLO BLU004-OMC1S31.hotmail.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753300AbbJHHJS (ORCPT ); Thu, 8 Oct 2015 03:09:18 -0400 Received: from BLU437-SMTP68 ([65.55.116.8]) by BLU004-OMC1S31.hotmail.com over TLS secured channel with Microsoft SMTPSVC(7.5.7601.23008); Thu, 8 Oct 2015 00:09:17 -0700 X-TMN: [uDgDc6/Akasdx96GIOeOHFu81Lr2tk3/] X-Originating-Email: [wanpeng.li@hotmail.com] Message-ID: From: Wanpeng Li To: Paolo Bonzini CC: Jan Kiszka , Bandan Das , Wincy Van , kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Wanpeng Li Subject: [PATCH v2 5/5] KVM: nVMX: expose VPID capability to L1 Date: Wed, 7 Oct 2015 22:57:26 -0700 X-Mailer: git-send-email 2.1.0 In-Reply-To: <1444283846-9964-1-git-send-email-wanpeng.li@hotmail.com> References: <1444283846-9964-1-git-send-email-wanpeng.li@hotmail.com> X-OriginalArrivalTime: 08 Oct 2015 07:09:15.0531 (UTC) FILETIME=[3DE1A5B0:01D10198] MIME-Version: 1.0 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00,FREEMAIL_FROM, RCVD_IN_DNSWL_HI,T_RP_MATCHES_RCVD,UNPARSEABLE_RELAY autolearn=ham version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Expose VPID capability to L1. For nested guests, we don't do anything specific for single context invalidation. Hence, only advertise support for global context invalidation. The major benefit of nested VPID comes from having separate vpids when switching between L1 and L2, and also when L2's vCPUs not sched in/out on L1. Reviewed-by: Wincy Van Signed-off-by: Wanpeng Li --- arch/x86/kvm/vmx.c | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 31d272e..22b4dc7 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -442,7 +442,7 @@ struct nested_vmx { u32 nested_vmx_true_entry_ctls_low; u32 nested_vmx_misc_low; u32 nested_vmx_misc_high; - u32 nested_vmx_ept_caps; + u64 nested_vmx_ept_vpid_caps; }; #define POSTED_INTR_ON 0 @@ -2489,18 +2489,22 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) /* nested EPT: emulate EPT also to L1 */ vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT; - vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | + vmx->nested.nested_vmx_ept_vpid_caps = VMX_EPT_PAGE_WALK_4_BIT | VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | VMX_EPT_INVEPT_BIT; - vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept; + vmx->nested.nested_vmx_ept_vpid_caps &= vmx_capability.ept; /* * For nested guests, we don't do anything specific * for single context invalidation. Hence, only advertise * support for global context invalidation. */ - vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT; + vmx->nested.nested_vmx_ept_vpid_caps |= VMX_EPT_EXTENT_GLOBAL_BIT; } else - vmx->nested.nested_vmx_ept_caps = 0; + vmx->nested.nested_vmx_ept_vpid_caps = 0; + + if (enable_vpid) + vmx->nested.nested_vmx_ept_vpid_caps |= (VMX_VPID_INVVPID_BIT | + VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT) << 32; if (enable_unrestricted_guest) vmx->nested.nested_vmx_secondary_ctls_high |= @@ -2616,8 +2620,7 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) vmx->nested.nested_vmx_secondary_ctls_high); break; case MSR_IA32_VMX_EPT_VPID_CAP: - /* Currently, no nested vpid support */ - *pdata = vmx->nested.nested_vmx_ept_caps; + *pdata = vmx->nested.nested_vmx_ept_vpid_caps; break; default: return 1; @@ -7152,7 +7155,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) if (!(vmx->nested.nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) || - !(vmx->nested.nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) { + !(vmx->nested.nested_vmx_ept_vpid_caps & VMX_EPT_INVEPT_BIT)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } @@ -7168,7 +7171,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); - types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; + types = (vmx->nested.nested_vmx_ept_vpid_caps >> VMX_EPT_EXTENT_SHIFT) & 6; if (!(types & (1UL << type))) { nested_vmx_failValid(vcpu, @@ -7207,14 +7210,15 @@ static int handle_invept(struct kvm_vcpu *vcpu) static int handle_invvpid(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 vmx_instruction_info; + u32 vmx_instruction_info, types; unsigned long type; gva_t gva; struct x86_exception e; int vpid; if (!(vmx->nested.nested_vmx_secondary_ctls_high & - SECONDARY_EXEC_ENABLE_VPID)) { + SECONDARY_EXEC_ENABLE_VPID) || + !(vmx->nested.nested_vmx_ept_vpid_caps & (VMX_VPID_INVVPID_BIT << 32))) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } @@ -7225,6 +7229,14 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); + types = (vmx->nested.nested_vmx_ept_vpid_caps >> VMX_VPID_EXTENT_SHIFT) & 0x7; + + if (!(types & (1UL << type))) { + nested_vmx_failValid(vcpu, + VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); + return 1; + } + /* according to the intel vmx instruction reference, the memory * operand is read even if it isn't needed (e.g., for type==global) */ @@ -8798,7 +8810,7 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) { WARN_ON(mmu_is_nested(vcpu)); kvm_init_shadow_ept_mmu(vcpu, - to_vmx(vcpu)->nested.nested_vmx_ept_caps & + to_vmx(vcpu)->nested.nested_vmx_ept_vpid_caps & VMX_EPT_EXECUTE_ONLY_BIT); vcpu->arch.mmu.set_cr3 = vmx_set_cr3; vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;