From patchwork Tue Nov 19 08:49:42 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Yang, Weijiang" X-Patchwork-Id: 11251565 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 0097014E5 for ; Tue, 19 Nov 2019 08:48:39 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id DD669222DF for ; Tue, 19 Nov 2019 08:48:38 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727382AbfKSIr7 (ORCPT ); Tue, 19 Nov 2019 03:47:59 -0500 Received: from mga17.intel.com ([192.55.52.151]:26328 "EHLO mga17.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727351AbfKSIr6 (ORCPT ); Tue, 19 Nov 2019 03:47:58 -0500 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga107.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 19 Nov 2019 00:47:58 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.68,322,1569308400"; d="scan'208";a="196427002" Received: from unknown (HELO local-michael-cet-test.sh.intel.com) ([10.239.159.128]) by orsmga007.jf.intel.com with ESMTP; 19 Nov 2019 00:47:53 -0800 From: Yang Weijiang To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, pbonzini@redhat.com, jmattson@google.com, sean.j.christopherson@intel.com Cc: yu.c.zhang@linux.intel.com, alazar@bitdefender.com, edwin.zhai@intel.com, Yang Weijiang Subject: [PATCH v7 2/9] vmx: spp: Add control flags for Sub-Page Protection(SPP) Date: Tue, 19 Nov 2019 16:49:42 +0800 Message-Id: <20191119084949.15471-3-weijiang.yang@intel.com> X-Mailer: git-send-email 2.17.2 In-Reply-To: <20191119084949.15471-1-weijiang.yang@intel.com> References: <20191119084949.15471-1-weijiang.yang@intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Check SPP capability in MSR_IA32_VMX_PROCBASED_CTLS2, its 23-bit indicates SPP capability. Enable SPP feature bit in CPU capabilities bitmap if it's supported. Co-developed-by: He Chen Signed-off-by: He Chen Co-developed-by: Zhang Yi Signed-off-by: Zhang Yi Signed-off-by: Yang Weijiang --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/vmx.h | 1 + arch/x86/kernel/cpu/intel.c | 4 ++++ arch/x86/kvm/mmu.h | 2 ++ arch/x86/kvm/vmx/capabilities.h | 5 +++++ arch/x86/kvm/vmx/vmx.c | 9 +++++++++ 6 files changed, 22 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index e880f2408e29..ee2c76fdadf6 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -228,6 +228,7 @@ #define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ #define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ #define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ +#define X86_FEATURE_SPP ( 8*32+ 5) /* Intel EPT-based Sub-Page Write Protection */ #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */ #define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index a39136b0d509..e1137807affc 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -68,6 +68,7 @@ #define SECONDARY_EXEC_XSAVES 0x00100000 #define SECONDARY_EXEC_PT_USE_GPA 0x01000000 #define SECONDARY_EXEC_MODE_BASED_EPT_EXEC 0x00400000 +#define SECONDARY_EXEC_ENABLE_SPP 0x00800000 #define SECONDARY_EXEC_TSC_SCALING 0x02000000 #define PIN_BASED_EXT_INTR_MASK 0x00000001 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 8d6d92ebeb54..27617e522f01 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -503,6 +503,7 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c) #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 #define x86_VMX_FEATURE_EPT_CAP_AD 0x00200000 +#define X86_VMX_FEATURE_PROC_CTLS2_SPP 0x00800000 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; u32 msr_vpid_cap, msr_ept_cap; @@ -513,6 +514,7 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c) clear_cpu_cap(c, X86_FEATURE_EPT); clear_cpu_cap(c, X86_FEATURE_VPID); clear_cpu_cap(c, X86_FEATURE_EPT_AD); + clear_cpu_cap(c, X86_FEATURE_SPP); rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); msr_ctl = vmx_msr_high | vmx_msr_low; @@ -536,6 +538,8 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c) } if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) set_cpu_cap(c, X86_FEATURE_VPID); + if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_SPP) + set_cpu_cap(c, X86_FEATURE_SPP); } } diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 54c2a377795b..3c1423526a98 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -26,6 +26,8 @@ #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT) #define PT_PAT_MASK (1ULL << 7) #define PT_GLOBAL_MASK (1ULL << 8) +#define PT_SPP_SHIFT 61 +#define PT_SPP_MASK (1ULL << PT_SPP_SHIFT) #define PT64_NX_SHIFT 63 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT) diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index d6664ee3d127..e3bde7a32123 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -241,6 +241,11 @@ static inline bool cpu_has_vmx_pml(void) return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML; } +static inline bool cpu_has_vmx_ept_spp(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_SPP; +} + static inline bool vmx_xsaves_supported(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index c030c96fc81a..0fcd02b531ef 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -60,6 +60,7 @@ #include "vmcs12.h" #include "vmx.h" #include "x86.h" +#include "spp.h" MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); @@ -113,6 +114,7 @@ module_param_named(pml, enable_pml, bool, S_IRUGO); static bool __read_mostly dump_invalid_vmcs = 0; module_param(dump_invalid_vmcs, bool, 0644); +static bool __read_mostly spp_supported = 0; #define MSR_BITMAP_MODE_X2APIC 1 #define MSR_BITMAP_MODE_X2APIC_APICV 2 @@ -2279,6 +2281,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf, SECONDARY_EXEC_RDSEED_EXITING | SECONDARY_EXEC_RDRAND_EXITING | SECONDARY_EXEC_ENABLE_PML | + SECONDARY_EXEC_ENABLE_SPP | SECONDARY_EXEC_TSC_SCALING | SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX | @@ -3931,6 +3934,9 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) if (!enable_pml) exec_control &= ~SECONDARY_EXEC_ENABLE_PML; + if (!spp_supported) + exec_control &= ~SECONDARY_EXEC_ENABLE_SPP; + if (vmx_xsaves_supported()) { /* Exposing XSAVES only when XSAVE is exposed */ bool xsaves_enabled = @@ -7521,6 +7527,9 @@ static __init int hardware_setup(void) if (!cpu_has_vmx_flexpriority()) flexpriority_enabled = 0; + if (cpu_has_vmx_ept_spp() && enable_ept) + spp_supported = 1; + if (!cpu_has_virtual_nmis()) enable_vnmi = 0;