From patchwork Wed Jan 28 15:54:05 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wincy Van X-Patchwork-Id: 5737251 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 46C2C9F54F for ; Thu, 29 Jan 2015 03:54:21 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 415CD20218 for ; Thu, 29 Jan 2015 03:54:20 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 324CD201ED for ; Thu, 29 Jan 2015 03:54:19 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753252AbbA2BNI (ORCPT ); Wed, 28 Jan 2015 20:13:08 -0500 Received: from mail-ie0-f172.google.com ([209.85.223.172]:59633 "EHLO mail-ie0-f172.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752222AbbA2BND (ORCPT ); Wed, 28 Jan 2015 20:13:03 -0500 Received: by mail-ie0-f172.google.com with SMTP id rd18so27266629iec.3; Wed, 28 Jan 2015 17:13:01 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=mime-version:from:date:message-id:subject:to:cc:content-type; bh=aDBs4RUCla/Qhwy/6l6GRVFUdVAl5Pu+NZAQXHYR2tU=; b=YIRmoR/NtfjxWs/YKmG2q/7bOhqkito7zQXgaEi1oBw5pTw0qEl1CsPeqnLePVIgFP xwaSgDgRKU0aZe9dMSOU7LjzPf7DQ1UuVLenRhP55UYAaS2bE3+x7f0kSVvJwdWgfyRw TrsVGfphFgBxPn65LHwRw/6U3Fo821k5iuY9HAV4x55pgA56CbDIgAh4UgxE0gxrgwBq rkXe13lYc+xXD/pRQnEoJuV1kACTVU1N7yhEuXOHJKrBxD2oJQZodwErXHtsx5bmXa/i 9YGecjpK5ZhgzwJ5O0hNRLFuarVOwx7gcMopy+/H+WQaX7DMcSoHNNAQPRd1OUAoItXb gAyg== X-Received: by 10.50.222.70 with SMTP id qk6mr4262617igc.47.1422460465927; Wed, 28 Jan 2015 07:54:25 -0800 (PST) MIME-Version: 1.0 Received: by 10.64.224.230 with HTTP; Wed, 28 Jan 2015 07:54:05 -0800 (PST) From: Wincy Van Date: Wed, 28 Jan 2015 23:54:05 +0800 Message-ID: Subject: [PATCH v4 1/6] KVM: nVMX: Use hardware MSR bitmap To: Paolo Bonzini , "gleb@kernel.org" , "Zhang, Yang Z" Cc: "linux-kernel@vger.kernel.org" , "kvm@vger.kernel.org" , Wanpeng Li , Jan Kiszka , =?UTF-8?B?6IyD5paH5LiA?= Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Spam-Status: No, score=-5.3 required=5.0 tests=BAYES_00, DATE_IN_PAST_06_12, DKIM_ADSP_CUSTOM_MED,DKIM_SIGNED,FREEMAIL_FROM,RCVD_IN_DNSWL_HI, T_DKIM_INVALID, T_RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Currently, if L1 enables MSR_BITMAP, we will emulate this feature, all of L2's msr access is intercepted by L0. Since many features like virtualize x2apic mode has a complicated logic and it is difficult for us to emulate, we should use hardware and merge the bitmap. This patch introduces nested_vmx_merge_msr_bitmap for future use. Signed-off-by: Wincy Van --- arch/x86/kvm/vmx.c | 77 ++++++++++++++++++++++++++++++++++++++++++++------- 1 files changed, 66 insertions(+), 11 deletions(-) unsigned long addr_field, @@ -8637,11 +8685,17 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); } + if (cpu_has_vmx_msr_bitmap() && + exec_control & CPU_BASED_USE_MSR_BITMAPS && + nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) { + vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_nested)); + } else + exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; + /* - * Merging of IO and MSR bitmaps not currently supported. + * Merging of IO bitmap not currently supported. * Rather, exit every time. */ - exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; exec_control &= ~CPU_BASED_USE_IO_BITMAPS; exec_control |= CPU_BASED_UNCOND_IO_EXITING; @@ -8792,15 +8846,13 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) return 1; } - if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) && - !PAGE_ALIGNED(vmcs12->msr_bitmap)) { + if (!nested_get_vmcs12_pages(vcpu, vmcs12)) { /*TODO: Also verify bits beyond physical address width are 0*/ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } - if (!nested_get_vmcs12_pages(vcpu, vmcs12)) { - /*TODO: Also verify bits beyond physical address width are 0*/ + if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); return 1; } @@ -9356,6 +9408,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, kvm_set_dr(vcpu, 7, 0x400); vmcs_write64(GUEST_IA32_DEBUGCTL, 0); + if (cpu_has_vmx_msr_bitmap()) + vmx_set_msr_bitmap(vcpu); + if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, vmcs12->vm_exit_msr_load_count)) nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); -- 1.7.1 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c987374..787f886 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -798,6 +798,7 @@ static unsigned long *vmx_msr_bitmap_legacy; static unsigned long *vmx_msr_bitmap_longmode; static unsigned long *vmx_msr_bitmap_legacy_x2apic; static unsigned long *vmx_msr_bitmap_longmode_x2apic; +static unsigned long *vmx_msr_bitmap_nested; static unsigned long *vmx_vmread_bitmap; static unsigned long *vmx_vmwrite_bitmap; @@ -5812,13 +5813,21 @@ static __init int hardware_setup(void) (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_msr_bitmap_longmode_x2apic) goto out4; + + if (nested) { + vmx_msr_bitmap_nested = + (unsigned long *)__get_free_page(GFP_KERNEL); + if (!vmx_msr_bitmap_nested) + goto out5; + } + vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_vmread_bitmap) - goto out5; + goto out6; vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_vmwrite_bitmap) - goto out6; + goto out7; memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); @@ -5834,10 +5843,12 @@ static __init int hardware_setup(void) memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); + if (nested) + memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE); if (setup_vmcs_config(&vmcs_config) < 0) { r = -EIO; - goto out7; + goto out8; } if (boot_cpu_has(X86_FEATURE_NX)) @@ -5944,10 +5955,13 @@ static __init int hardware_setup(void) return alloc_kvm_area(); -out7: +out8: free_page((unsigned long)vmx_vmwrite_bitmap); -out6: +out7: free_page((unsigned long)vmx_vmread_bitmap); +out6: + if (nested) + free_page((unsigned long)vmx_msr_bitmap_nested); out5: free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic); out4: @@ -5974,6 +5988,8 @@ static __exit void hardware_unsetup(void) free_page((unsigned long)vmx_io_bitmap_a); free_page((unsigned long)vmx_vmwrite_bitmap); free_page((unsigned long)vmx_vmread_bitmap); + if (nested) + free_page((unsigned long)vmx_msr_bitmap_nested); free_kvm_area(); } @@ -8305,6 +8321,38 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); } +static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + int maxphyaddr; + u64 addr; + + if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) + return 0; + + if (vmcs12_read_any(vcpu, MSR_BITMAP, &addr)) { + WARN_ON(1); + return -EINVAL; + } + maxphyaddr = cpuid_maxphyaddr(vcpu); + + if (!PAGE_ALIGNED(vmcs12->msr_bitmap) || + ((addr + PAGE_SIZE) >> maxphyaddr)) + return -EINVAL; + + return 0; +} + +/* + * Merge L0's and L1's MSR bitmap, return false to indicate that + * we do not use the hardware. + */ +static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + return false; +} + static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, unsigned long count_field,