From patchwork Sat May 9 11:05:35 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xiaoyao Li X-Patchwork-Id: 11537905 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 6545B15E6 for ; Sat, 9 May 2020 03:03:46 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 57AE120731 for ; Sat, 9 May 2020 03:03:46 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728716AbgEIDDm (ORCPT ); Fri, 8 May 2020 23:03:42 -0400 Received: from mga12.intel.com ([192.55.52.136]:55091 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728717AbgEIDDl (ORCPT ); Fri, 8 May 2020 23:03:41 -0400 IronPort-SDR: 6IdfPla3FRkc4FPFOD0QtnxxSJl30OeBEwGedyHKryGypgKS7La4MDhVSGEIJPIYEX+YdIbF/R xWktQxW1ieQg== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 May 2020 20:03:41 -0700 IronPort-SDR: Te3FU+RtqOez0or2hntj7545tNBrvWOGgVG24krYgjWFaGqy7B0EK7TZtJO51Z7r9x5oijbjOI DyiahqZURsTg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,370,1583222400"; d="scan'208";a="408310990" Received: from lxy-dell.sh.intel.com ([10.239.159.21]) by orsmga004.jf.intel.com with ESMTP; 08 May 2020 20:03:35 -0700 From: Xiaoyao Li To: Paolo Bonzini , Thomas Gleixner , Sean Christopherson , kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org, x86@kernel.org, Ingo Molnar , Borislav Petkov , "H. Peter Anvin" , Andy Lutomirski , peterz@infradead.org, Arvind Sankar , Tony Luck , Fenghua Yu , Xiaoyao Li Subject: [PATCH v9 1/8] x86/split_lock: Rename TIF_SLD to TIF_SLD_DISABLED Date: Sat, 9 May 2020 19:05:35 +0800 Message-Id: <20200509110542.8159-2-xiaoyao.li@intel.com> X-Mailer: git-send-email 2.18.2 In-Reply-To: <20200509110542.8159-1-xiaoyao.li@intel.com> References: <20200509110542.8159-1-xiaoyao.li@intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org TIF_SLD can only be set if a user space thread hits split lock and sld_state == sld_warn. This flag is set to indicate SLD (split lock detection) is turned off for the thread, so rename it to TIF_SLD_DISABLED, which is pretty self explaining. Suggested-by: Sean Christopherson Suggested-by: Thomas Gleixner Signed-off-by: Xiaoyao Li --- arch/x86/include/asm/thread_info.h | 6 +++--- arch/x86/kernel/cpu/intel.c | 6 +++--- arch/x86/kernel/process.c | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 8de8ceccb8bc..451a930de1c0 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -92,7 +92,7 @@ struct thread_info { #define TIF_NOCPUID 15 /* CPUID is not accessible in userland */ #define TIF_NOTSC 16 /* TSC is not accessible in userland */ #define TIF_IA32 17 /* IA32 compatibility process */ -#define TIF_SLD 18 /* Restore split lock detection on context switch */ +#define TIF_SLD_DISABLED 18 /* split lock detection is turned off */ #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */ #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ @@ -122,7 +122,7 @@ struct thread_info { #define _TIF_NOCPUID (1 << TIF_NOCPUID) #define _TIF_NOTSC (1 << TIF_NOTSC) #define _TIF_IA32 (1 << TIF_IA32) -#define _TIF_SLD (1 << TIF_SLD) +#define _TIF_SLD_DISABLED (1 << TIF_SLD_DISABLED) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) @@ -141,7 +141,7 @@ struct thread_info { /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW_BASE \ (_TIF_NOCPUID | _TIF_NOTSC | _TIF_BLOCKSTEP | \ - _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE | _TIF_SLD) + _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE | _TIF_SLD_DISABLED) /* * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated. diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index a19a680542ce..0e6aee6ef1e8 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -1074,11 +1074,11 @@ static void split_lock_warn(unsigned long ip) /* * Disable the split lock detection for this task so it can make - * progress and set TIF_SLD so the detection is re-enabled via + * progress and set TIF_SLD_DISABLED so the detection is re-enabled via * switch_to_sld() when the task is scheduled out. */ sld_update_msr(false); - set_tsk_thread_flag(current, TIF_SLD); + set_tsk_thread_flag(current, TIF_SLD_DISABLED); } bool handle_guest_split_lock(unsigned long ip) @@ -1116,7 +1116,7 @@ bool handle_user_split_lock(struct pt_regs *regs, long error_code) */ void switch_to_sld(unsigned long tifn) { - sld_update_msr(!(tifn & _TIF_SLD)); + sld_update_msr(!(tifn & _TIF_SLD_DISABLED)); } /* diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 9da70b279dad..e7693a283489 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -650,7 +650,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p) __speculation_ctrl_update(~tifn, tifn); } - if ((tifp ^ tifn) & _TIF_SLD) + if ((tifp ^ tifn) & _TIF_SLD_DISABLED) switch_to_sld(tifn); } From patchwork Sat May 9 11:05:36 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xiaoyao Li X-Patchwork-Id: 11537907 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id D3B33139F for ; Sat, 9 May 2020 03:03:51 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id C80522495C for ; Sat, 9 May 2020 03:03:51 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728748AbgEIDDr (ORCPT ); Fri, 8 May 2020 23:03:47 -0400 Received: from mga12.intel.com ([192.55.52.136]:55091 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728739AbgEIDDq (ORCPT ); Fri, 8 May 2020 23:03:46 -0400 IronPort-SDR: /Q6YsntqP573q4KiBkWmK+RqpaxxOipyzClw/iojjnQMNor5tdxGqRxTpDkMTovl0pii1jESFA 4bwOWqHztB1g== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 May 2020 20:03:46 -0700 IronPort-SDR: w81V+znp4HRxnQs0OiYVBqqj8WnqelGP6vV18yP9ihv1aue482DOxxu9XQnfYQ5G8r0MuljAVz Az3chD+dCYXA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,370,1583222400"; d="scan'208";a="408311025" Received: from lxy-dell.sh.intel.com ([10.239.159.21]) by orsmga004.jf.intel.com with ESMTP; 08 May 2020 20:03:40 -0700 From: Xiaoyao Li To: Paolo Bonzini , Thomas Gleixner , Sean Christopherson , kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org, x86@kernel.org, Ingo Molnar , Borislav Petkov , "H. Peter Anvin" , Andy Lutomirski , peterz@infradead.org, Arvind Sankar , Tony Luck , Fenghua Yu , Xiaoyao Li Subject: [PATCH v9 2/8] x86/split_lock: Remove bogus case in handle_guest_split_lock() Date: Sat, 9 May 2020 19:05:36 +0800 Message-Id: <20200509110542.8159-3-xiaoyao.li@intel.com> X-Mailer: git-send-email 2.18.2 In-Reply-To: <20200509110542.8159-1-xiaoyao.li@intel.com> References: <20200509110542.8159-1-xiaoyao.li@intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org The bogus case can never happen, i.e., when sld_state == sld_off, guest won't trigger split lock #AC and of course no handle_guest_split_lock() will be called. Beside, drop bogus case also makes future patch easier to remove sld_state if we reach the alignment that it must be sld_warn or sld_fatal when handle_guest_split_lock() is called. Signed-off-by: Xiaoyao Li --- The alternative would be to remove the "SLD enabled" check from KVM so that a truly unexpected/bogus #AC would generate a warn. It's not clear whether or not calling handle_guest_split_lock() iff SLD is enabled was intended in the long term. --- arch/x86/kernel/cpu/intel.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 0e6aee6ef1e8..4602dac14dcb 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -1088,9 +1088,8 @@ bool handle_guest_split_lock(unsigned long ip) return true; } - pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n", - current->comm, current->pid, - sld_state == sld_fatal ? "fatal" : "bogus", ip); + pr_warn_once("#AC: %s/%d fatal split_lock trap at address: 0x%lx\n", + current->comm, current->pid, ip); current->thread.error_code = 0; current->thread.trap_nr = X86_TRAP_AC; From patchwork Sat May 9 11:05:37 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xiaoyao Li X-Patchwork-Id: 11537919 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id C5D73139F for ; Sat, 9 May 2020 03:04:21 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id B91E4218AC for ; Sat, 9 May 2020 03:04:21 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728737AbgEIDDx (ORCPT ); Fri, 8 May 2020 23:03:53 -0400 Received: from mga12.intel.com ([192.55.52.136]:55091 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728752AbgEIDDw (ORCPT ); Fri, 8 May 2020 23:03:52 -0400 IronPort-SDR: zL+JbQmzfdTfKk8nn3Ddi3jL6g9MXXYeVvnQbPQpr47Icd6XmYI0Ip5CziBlIA24iYJRpZGGmZ KJ7hoTfNFxTA== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 May 2020 20:03:48 -0700 IronPort-SDR: 8OhJwXpbyoeBle1+jQMFkSwIHy6Qoo5RYA1KJuUli6gd+rwJQNqP2nglitEJHCVI0wXGjDiCOf BaWfbn/wzMcA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,370,1583222400"; d="scan'208";a="408311046" Received: from lxy-dell.sh.intel.com ([10.239.159.21]) by orsmga004.jf.intel.com with ESMTP; 08 May 2020 20:03:44 -0700 From: Xiaoyao Li To: Paolo Bonzini , Thomas Gleixner , Sean Christopherson , kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org, x86@kernel.org, Ingo Molnar , Borislav Petkov , "H. Peter Anvin" , Andy Lutomirski , peterz@infradead.org, Arvind Sankar , Tony Luck , Fenghua Yu , Xiaoyao Li Subject: [PATCH v9 3/8] x86/split_lock: Introduce flag X86_FEATURE_SLD_FATAL and drop sld_state Date: Sat, 9 May 2020 19:05:37 +0800 Message-Id: <20200509110542.8159-4-xiaoyao.li@intel.com> X-Mailer: git-send-email 2.18.2 In-Reply-To: <20200509110542.8159-1-xiaoyao.li@intel.com> References: <20200509110542.8159-1-xiaoyao.li@intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Introduce a synthetic feature flag X86_FEATURE_SLD_FATAL, which means kernel is in sld_fatal mode if set. Now sld_state is not needed any more that the state of SLD can be inferred from X86_FEATURE_SPLIT_LOCK_DETECT and X86_FEATURE_SLD_FATAL. Suggested-by: Sean Christopherson Signed-off-by: Xiaoyao Li --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/kernel/cpu/intel.c | 16 ++++++---------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index db189945e9b0..260adfc6c61a 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -286,6 +286,7 @@ #define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */ #define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */ #define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */ +#define X86_FEATURE_SLD_FATAL (11*32+ 7) /* split lock detection in fatal mode */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 4602dac14dcb..93b8ccf2fa11 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -40,12 +40,6 @@ enum split_lock_detect_state { sld_fatal, }; -/* - * Default to sld_off because most systems do not support split lock detection - * split_lock_setup() will switch this to sld_warn on systems that support - * split lock detect, unless there is a command line override. - */ -static enum split_lock_detect_state sld_state __ro_after_init = sld_off; static u64 msr_test_ctrl_cache __ro_after_init; /* @@ -1043,8 +1037,9 @@ static void __init split_lock_setup(void) return; } - sld_state = state; setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT); + if (state == sld_fatal) + setup_force_cpu_cap(X86_FEATURE_SLD_FATAL); } /* @@ -1064,7 +1059,7 @@ static void sld_update_msr(bool on) static void split_lock_init(void) { - split_lock_verify_msr(sld_state != sld_off); + split_lock_verify_msr(boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)); } static void split_lock_warn(unsigned long ip) @@ -1083,7 +1078,7 @@ static void split_lock_warn(unsigned long ip) bool handle_guest_split_lock(unsigned long ip) { - if (sld_state == sld_warn) { + if (!boot_cpu_has(X86_FEATURE_SLD_FATAL)) { split_lock_warn(ip); return true; } @@ -1100,7 +1095,8 @@ EXPORT_SYMBOL_GPL(handle_guest_split_lock); bool handle_user_split_lock(struct pt_regs *regs, long error_code) { - if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal) + if ((regs->flags & X86_EFLAGS_AC) || + boot_cpu_has(X86_FEATURE_SLD_FATAL)) return false; split_lock_warn(regs->ip); return true; From patchwork Sat May 9 11:05:38 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xiaoyao Li X-Patchwork-Id: 11537909 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 84B4F15E6 for ; Sat, 9 May 2020 03:03:59 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 77CEA24965 for ; Sat, 9 May 2020 03:03:59 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728768AbgEIDDy (ORCPT ); Fri, 8 May 2020 23:03:54 -0400 Received: from mga12.intel.com ([192.55.52.136]:55120 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728692AbgEIDDw (ORCPT ); Fri, 8 May 2020 23:03:52 -0400 IronPort-SDR: U9l+Iq81pNEVyB5k8csACETrXBqnYFWSbA+sWzf9+YzS5IgrP7HZzGLUYgjeJnB+xSaxyicstU 2UBgfPzbZ3TQ== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 May 2020 20:03:52 -0700 IronPort-SDR: XnVfzQjeJ2drVnSfFyargOrUUqXVWmNQRzk+Vucny6ZydagwnsP8bRWSVS+Ilnsfg1/MU8DSz3 qolzq8oKEq7Q== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,370,1583222400"; d="scan'208";a="408311062" Received: from lxy-dell.sh.intel.com ([10.239.159.21]) by orsmga004.jf.intel.com with ESMTP; 08 May 2020 20:03:48 -0700 From: Xiaoyao Li To: Paolo Bonzini , Thomas Gleixner , Sean Christopherson , kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org, x86@kernel.org, Ingo Molnar , Borislav Petkov , "H. Peter Anvin" , Andy Lutomirski , peterz@infradead.org, Arvind Sankar , Tony Luck , Fenghua Yu , Xiaoyao Li Subject: [PATCH v9 4/8] x86/split_lock: Introduce split_lock_virt_switch() and two wrappers Date: Sat, 9 May 2020 19:05:38 +0800 Message-Id: <20200509110542.8159-5-xiaoyao.li@intel.com> X-Mailer: git-send-email 2.18.2 In-Reply-To: <20200509110542.8159-1-xiaoyao.li@intel.com> References: <20200509110542.8159-1-xiaoyao.li@intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Introduce split_lock_virt_switch(), which is used for toggling split lock detection setting as well as updating TIF_SLD_DISABLED flag to make them consistent. Note, it can only be used in sld warn mode, i.e., X86_FEATURE_SPLIT_LOCK_DETECT && !X86_FEATURE_SLD_FATAL. The FATAL check is handled by wrappers, split_lock_set_guest() and split_lock_restore_host(), that will be used by KVM when virtualizing split lock detection for guest in the future. Signed-off-by: Xiaoyao Li --- arch/x86/include/asm/cpu.h | 33 +++++++++++++++++++++++++++++++++ arch/x86/kernel/cpu/intel.c | 20 ++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index dd17c2da1af5..a57f00f1d5b5 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -45,6 +45,7 @@ extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c); extern void switch_to_sld(unsigned long tifn); extern bool handle_user_split_lock(struct pt_regs *regs, long error_code); extern bool handle_guest_split_lock(unsigned long ip); +extern bool split_lock_virt_switch(bool on); #else static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {} static inline void switch_to_sld(unsigned long tifn) {} @@ -57,5 +58,37 @@ static inline bool handle_guest_split_lock(unsigned long ip) { return false; } + +static inline bool split_lock_virt_switch(bool on) { return false; } #endif + +/** + * split_lock_set_guest - Set SLD state for a guest + * @guest_sld_on: If SLD is on in the guest + * + * returns: %true if SLD was enabled in the task + * + * Must be called when X86_FEATURE_SPLIT_LOCK_DETECT is available. + */ +static inline bool split_lock_set_guest(bool guest_sld_on) +{ + if (static_cpu_has(X86_FEATURE_SLD_FATAL)) + return true; + + return split_lock_virt_switch(guest_sld_on); +} + +/** + * split_lock_restore_host - Restore host SLD state + * @host_sld_on: If SLD is on in the host + * + * Must be called when X86_FEATURE_SPLIT_LOCK_DETECT is available. + */ +static inline void split_lock_restore_host(bool host_sld_on) +{ + if (static_cpu_has(X86_FEATURE_SLD_FATAL)) + return; + + split_lock_virt_switch(host_sld_on); +} #endif /* _ASM_X86_CPU_H */ diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 93b8ccf2fa11..1e2a74e8c592 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -1062,6 +1062,26 @@ static void split_lock_init(void) split_lock_verify_msr(boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)); } +/* + * It should never be called directly but should use split_lock_set_guest() + * and split_lock_restore_host() instead. + * + * The caller needs to be in preemption disabled context to ensure + * MSR state and TIF_SLD_DISABLED state consistent. + */ +bool split_lock_virt_switch(bool on) +{ + bool was_on = !test_thread_flag(TIF_SLD_DISABLED); + + if (on != was_on) { + sld_update_msr(on); + update_thread_flag(TIF_SLD_DISABLED, !on); + } + + return was_on; +} +EXPORT_SYMBOL_GPL(split_lock_virt_switch); + static void split_lock_warn(unsigned long ip) { pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n", From patchwork Sat May 9 11:05:39 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xiaoyao Li X-Patchwork-Id: 11537911 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 7661015E6 for ; Sat, 9 May 2020 03:04:03 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 6949724969 for ; Sat, 9 May 2020 03:04:03 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728787AbgEIDD7 (ORCPT ); Fri, 8 May 2020 23:03:59 -0400 Received: from mga12.intel.com ([192.55.52.136]:55127 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728718AbgEIDD5 (ORCPT ); Fri, 8 May 2020 23:03:57 -0400 IronPort-SDR: 6vXjjzZfwAWo6AH0Zqhyntqwqy38ZTfpXgpPOh61vsZj708UkI2iyIAT4nAgqo6rBDxL+P+SIc dMwYBstv8ScQ== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 May 2020 20:03:57 -0700 IronPort-SDR: IH3LtByqkiLmCjjX1nZJz2E2JCjchxKQZ7+/3LW8VOr8UgyRDKzILsAY+XQbuB5s3F1wZa3rVS bjGjBUsEVnWw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,370,1583222400"; d="scan'208";a="408311074" Received: from lxy-dell.sh.intel.com ([10.239.159.21]) by orsmga004.jf.intel.com with ESMTP; 08 May 2020 20:03:52 -0700 From: Xiaoyao Li To: Paolo Bonzini , Thomas Gleixner , Sean Christopherson , kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org, x86@kernel.org, Ingo Molnar , Borislav Petkov , "H. Peter Anvin" , Andy Lutomirski , peterz@infradead.org, Arvind Sankar , Tony Luck , Fenghua Yu , Xiaoyao Li Subject: [PATCH v9 5/8] x86/kvm: Introduce paravirt split lock detection enumeration Date: Sat, 9 May 2020 19:05:39 +0800 Message-Id: <20200509110542.8159-6-xiaoyao.li@intel.com> X-Mailer: git-send-email 2.18.2 In-Reply-To: <20200509110542.8159-1-xiaoyao.li@intel.com> References: <20200509110542.8159-1-xiaoyao.li@intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Introduce KVM_FEATURE_SPLIT_LOCK_DETECT, for which linux guest running on KVM can enumerate the avaliablility of feature split lock detection. Introduce KVM_HINTS_SLD_FATAL, which tells whether host is sld_fatal mode, i.e., whether split lock detection is forced on for guest vcpu. Signed-off-by: Xiaoyao Li --- Documentation/virt/kvm/cpuid.rst | 29 ++++++++++++++++++++-------- arch/x86/include/uapi/asm/kvm_para.h | 8 +++++--- 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/Documentation/virt/kvm/cpuid.rst b/Documentation/virt/kvm/cpuid.rst index 01b081f6e7ea..a7e85ac090a8 100644 --- a/Documentation/virt/kvm/cpuid.rst +++ b/Documentation/virt/kvm/cpuid.rst @@ -86,6 +86,12 @@ KVM_FEATURE_PV_SCHED_YIELD 13 guest checks this feature bit before using paravirtualized sched yield. +KVM_FEATURE_SPLIT_LOCK_DETECT 14 guest checks this feature bit for + available of split lock detection. + + KVM doesn't support enumerating + split lock detection via CPU model + KVM_FEATURE_CLOCSOURCE_STABLE_BIT 24 host will warn if no guest-side per-cpu warps are expeced in kvmclock @@ -97,11 +103,18 @@ KVM_FEATURE_CLOCSOURCE_STABLE_BIT 24 host will warn if no guest-side Where ``flag`` here is defined as below: -================== ============ ================================= -flag value meaning -================== ============ ================================= -KVM_HINTS_REALTIME 0 guest checks this feature bit to - determine that vCPUs are never - preempted for an unlimited time - allowing optimizations -================== ============ ================================= +================================ ============ ================================= +flag value meaning +================================ ============ ================================= +KVM_HINTS_REALTIME 0 guest checks this feature bit to + determine that vCPUs are never + preempted for an unlimited time + allowing optimizations + +KVM_HINTS_SLD_FATAL 1 set if split lock detection is + forced on in the host, in which + case KVM will kill the guest if it + generates a split lock #AC with + SLD disabled from guest's + perspective +================================ ============ ================================= diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h index 2a8e0b6b9805..a8fe0221403a 100644 --- a/arch/x86/include/uapi/asm/kvm_para.h +++ b/arch/x86/include/uapi/asm/kvm_para.h @@ -31,14 +31,16 @@ #define KVM_FEATURE_PV_SEND_IPI 11 #define KVM_FEATURE_POLL_CONTROL 12 #define KVM_FEATURE_PV_SCHED_YIELD 13 - -#define KVM_HINTS_REALTIME 0 - +#define KVM_FEATURE_SPLIT_LOCK_DETECT 14 /* The last 8 bits are used to indicate how to interpret the flags field * in pvclock structure. If no bits are set, all flags are ignored. */ #define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24 +/* KVM feature hints in CPUID.0x40000001.EDX */ +#define KVM_HINTS_REALTIME 0 +#define KVM_HINTS_SLD_FATAL 1 + #define MSR_KVM_WALL_CLOCK 0x11 #define MSR_KVM_SYSTEM_TIME 0x12 From patchwork Sat May 9 11:05:40 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xiaoyao Li X-Patchwork-Id: 11537913 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 60512139F for ; Sat, 9 May 2020 03:04:07 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 5408624965 for ; Sat, 9 May 2020 03:04:07 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728808AbgEIDED (ORCPT ); Fri, 8 May 2020 23:04:03 -0400 Received: from mga12.intel.com ([192.55.52.136]:55127 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728798AbgEIDED (ORCPT ); Fri, 8 May 2020 23:04:03 -0400 IronPort-SDR: zpxbVdBpw8e8kSWVRQWMknKA2/MVQoDuzvZ7GxmpbkGbgg86Mn4tpWicKhN/hF8i3ca0GJBQAX zl1IZtoCacdA== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 May 2020 20:04:02 -0700 IronPort-SDR: nf6FiCRLs+sAUC5e2D9mMEaed8nZIYM7FPzF2WXVHDy4z1D8XnLWHYtOv0vetpW/06MEkCgmR0 ei9dnBUfJYCw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,370,1583222400"; d="scan'208";a="408311087" Received: from lxy-dell.sh.intel.com ([10.239.159.21]) by orsmga004.jf.intel.com with ESMTP; 08 May 2020 20:03:55 -0700 From: Xiaoyao Li To: Paolo Bonzini , Thomas Gleixner , Sean Christopherson , kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org, x86@kernel.org, Ingo Molnar , Borislav Petkov , "H. Peter Anvin" , Andy Lutomirski , peterz@infradead.org, Arvind Sankar , Tony Luck , Fenghua Yu , Xiaoyao Li Subject: [PATCH v9 6/8] KVM: VMX: Enable MSR TEST_CTRL for guest Date: Sat, 9 May 2020 19:05:40 +0800 Message-Id: <20200509110542.8159-7-xiaoyao.li@intel.com> X-Mailer: git-send-email 2.18.2 In-Reply-To: <20200509110542.8159-1-xiaoyao.li@intel.com> References: <20200509110542.8159-1-xiaoyao.li@intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Unconditionally allow the guest to read and zero-write MSR TEST_CTRL. This matches the fact that most Intel CPUs support MSR TEST_CTRL, and it also alleviates the effort to handle wrmsr/rdmsr when split lock detection is exposed to the guest in a future patch. Signed-off-by: Xiaoyao Li --- arch/x86/kvm/vmx/vmx.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index c2c6335a998c..dbec38ad5035 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1789,6 +1789,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) u32 index; switch (msr_info->index) { + case MSR_TEST_CTRL: + msr_info->data = 0; + break; #ifdef CONFIG_X86_64 case MSR_FS_BASE: msr_info->data = vmcs_readl(GUEST_FS_BASE); @@ -1942,6 +1945,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) u32 index; switch (msr_index) { + case MSR_TEST_CTRL: + if (data) + return 1; + + break; case MSR_EFER: ret = kvm_set_msr_common(vcpu, msr_info); break; From patchwork Sat May 9 11:05:41 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xiaoyao Li X-Patchwork-Id: 11537917 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 2808715E6 for ; Sat, 9 May 2020 03:04:17 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 1533521655 for ; Sat, 9 May 2020 03:04:17 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728817AbgEIDEI (ORCPT ); Fri, 8 May 2020 23:04:08 -0400 Received: from mga12.intel.com ([192.55.52.136]:55134 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728802AbgEIDED (ORCPT ); Fri, 8 May 2020 23:04:03 -0400 IronPort-SDR: joa51eO9fAD6H/sThBE8+JHYjhT+Q8zTwb6iqbKmCSTzrQ9tamnXi/+V4P0TJAmK2EBPg6u28U 6t16k2Eho31Q== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 May 2020 20:04:03 -0700 IronPort-SDR: vZ6cOC3RE6+a2TMull6WB4mp3ijT1dgtXYiWb2SVWaHHNPzG6k4B14lnPs1PPieRlJYYn1gBkk ZvV3ByEm8ppw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,370,1583222400"; d="scan'208";a="408311104" Received: from lxy-dell.sh.intel.com ([10.239.159.21]) by orsmga004.jf.intel.com with ESMTP; 08 May 2020 20:03:59 -0700 From: Xiaoyao Li To: Paolo Bonzini , Thomas Gleixner , Sean Christopherson , kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org, x86@kernel.org, Ingo Molnar , Borislav Petkov , "H. Peter Anvin" , Andy Lutomirski , peterz@infradead.org, Arvind Sankar , Tony Luck , Fenghua Yu , Xiaoyao Li Subject: [PATCH v9 7/8] KVM: VMX: virtualize split lock detection Date: Sat, 9 May 2020 19:05:41 +0800 Message-Id: <20200509110542.8159-8-xiaoyao.li@intel.com> X-Mailer: git-send-email 2.18.2 In-Reply-To: <20200509110542.8159-1-xiaoyao.li@intel.com> References: <20200509110542.8159-1-xiaoyao.li@intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org TEST_CTRL MSR is per-core scope, i.e., the sibling threads in the same physical core share the same MSR. This requires additional constraint when exposing it to guest. 1) When host SLD state is sld_off (no X86_FEATURE_SPLIT_LOCK_DETECT), feature split lock detection is unsupported/disabled. Cannot expose it to guest. 2) When host SLD state is sld_warn (has X86_FEATURE_SPLIT_LOCK_DETECT but no X86_FEATURE_SLD_FATAL), feature split lock detection can be exposed to guest only when nosmt due to the per-core scope. In this case, guest's setting can be propagated into real hardware MSR. Further, to avoid the potiential MSR_TEST_CTRL.SLD toggling overhead during every vm-enter and vm-exit, it loads and keeps guest's SLD setting when in vcpu run loop and guest_state_loaded, i.e., betweer vmx_prepare_switch_to_guest() and vmx_prepare_switch_to_host(). 3) when host SLD state is sld_fatal (has X86_FEATURE_SLD_FATAL), feature split lock detection can be exposed to guest regardless of SMT but KVM_HINTS_SLD_FATAL needs to be set. In this case, guest can still set and clear MSR_TEST_CTRL.SLD bit, but the bit value never be propagated to real MSR. KVM always keeps SLD bit turned on for guest vcpu. The reason why not force guest MSR_CTRL.SLD bit to 1 is that guest needs to set this bit to 1 itself to tell KVM it's SLD-aware. Signed-off-by: Xiaoyao Li --- arch/x86/kvm/cpuid.c | 6 ++++ arch/x86/kvm/vmx/vmx.c | 68 ++++++++++++++++++++++++++++++++++++------ arch/x86/kvm/vmx/vmx.h | 3 ++ arch/x86/kvm/x86.c | 6 +++- arch/x86/kvm/x86.h | 7 +++++ 5 files changed, 80 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 901cd1fdecd9..7d9f2daddaf3 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -717,9 +717,15 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) if (sched_info_on()) entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); + if (kvm_split_lock_detect_supported()) + entry->eax |= (1 << KVM_FEATURE_SPLIT_LOCK_DETECT); + entry->ebx = 0; entry->ecx = 0; entry->edx = 0; + + if (boot_cpu_has(X86_FEATURE_SLD_FATAL)) + entry->edx |= (1 << KVM_HINTS_SLD_FATAL); break; case 0x80000000: entry->eax = min(entry->eax, 0x8000001f); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index dbec38ad5035..1cc386c5801d 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1120,6 +1120,29 @@ void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, } } +static inline u64 vmx_msr_test_ctrl_valid_bits(struct vcpu_vmx *vmx) +{ + u64 valid_bits = 0; + + if (vmx->guest_has_sld) + valid_bits |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; + + return valid_bits; +} + +static inline bool guest_sld_on(struct vcpu_vmx *vmx) +{ + return vmx->msr_test_ctrl & MSR_TEST_CTRL_SPLIT_LOCK_DETECT; +} + +static inline void vmx_update_guest_sld(struct vcpu_vmx *vmx) +{ + preempt_disable(); + if (vmx->guest_state_loaded) + split_lock_set_guest(guest_sld_on(vmx)); + preempt_enable(); +} + void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -1188,6 +1211,10 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) #endif vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base); + + if (static_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) && vmx->guest_has_sld) + vmx->host_sld_on = split_lock_set_guest(guest_sld_on(vmx)); + vmx->guest_state_loaded = true; } @@ -1226,6 +1253,10 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); #endif load_fixmap_gdt(raw_smp_processor_id()); + + if (static_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) && vmx->guest_has_sld) + split_lock_restore_host(vmx->host_sld_on); + vmx->guest_state_loaded = false; vmx->guest_msrs_ready = false; } @@ -1790,7 +1821,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr_info->index) { case MSR_TEST_CTRL: - msr_info->data = 0; + msr_info->data = vmx->msr_test_ctrl; break; #ifdef CONFIG_X86_64 case MSR_FS_BASE: @@ -1946,9 +1977,12 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr_index) { case MSR_TEST_CTRL: - if (data) + if (data & ~vmx_msr_test_ctrl_valid_bits(vmx)) return 1; + vmx->msr_test_ctrl = data; + vmx_update_guest_sld(vmx); + break; case MSR_EFER: ret = kvm_set_msr_common(vcpu, msr_info); @@ -4266,7 +4300,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vmx->rmode.vm86_active = 0; vmx->spec_ctrl = 0; - + vmx->msr_test_ctrl = 0; vmx->msr_ia32_umwait_control = 0; vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); @@ -4596,24 +4630,32 @@ static int handle_machine_check(struct kvm_vcpu *vcpu) return 1; } +static inline bool guest_alignment_check_enabled(struct kvm_vcpu *vcpu) +{ + return vmx_get_cpl(vcpu) == 3 && kvm_read_cr0_bits(vcpu, X86_CR0_AM) && + (kvm_get_rflags(vcpu) & X86_EFLAGS_AC); +} + /* * If the host has split lock detection disabled, then #AC is * unconditionally injected into the guest, which is the pre split lock * detection behaviour. * * If the host has split lock detection enabled then #AC is - * only injected into the guest when: - * - Guest CPL == 3 (user mode) - * - Guest has #AC detection enabled in CR0 - * - Guest EFLAGS has AC bit set + * injected into the guest when: + * 1) guest has alignment check enabled; + * or 2) guest has split lock detection enabled; */ static inline bool guest_inject_ac(struct kvm_vcpu *vcpu) { if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) return true; - return vmx_get_cpl(vcpu) == 3 && kvm_read_cr0_bits(vcpu, X86_CR0_AM) && - (kvm_get_rflags(vcpu) & X86_EFLAGS_AC); + /* + * A split lock access must be an unaligned access, so we should check + * guest_cpu_alignent_check_enabled() fisrt. + */ + return guest_alignment_check_enabled(vcpu) || guest_sld_on(to_vmx(vcpu)); } static int handle_exception_nmi(struct kvm_vcpu *vcpu) @@ -7109,6 +7151,7 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) static void vmx_cpuid_update(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); + struct kvm_cpuid_entry2 *best; /* xsaves_enabled is recomputed in vmx_compute_secondary_exec_control(). */ vcpu->arch.xsaves_enabled = false; @@ -7144,6 +7187,13 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) vmx_set_guest_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE); } } + + vmx->guest_has_sld = false; + if (kvm_split_lock_detect_supported()) { + best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0); + if (best && (best->eax & 1 << KVM_FEATURE_SPLIT_LOCK_DETECT)) + vmx->guest_has_sld = true; + } } static __init void vmx_set_cpu_caps(void) diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index aab9df55336e..aa58fb50081e 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -216,12 +216,15 @@ struct vcpu_vmx { int nmsrs; int save_nmsrs; bool guest_msrs_ready; + bool guest_has_sld; + bool host_sld_on; #ifdef CONFIG_X86_64 u64 msr_host_kernel_gs_base; u64 msr_guest_kernel_gs_base; #endif u64 spec_ctrl; + u64 msr_test_ctrl; u32 msr_ia32_umwait_control; u32 secondary_exec_control; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c5835f9cb9ad..f04ed46c433e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1189,7 +1189,7 @@ static const u32 msrs_to_save_all[] = { #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, - MSR_IA32_SPEC_CTRL, + MSR_IA32_SPEC_CTRL, MSR_TEST_CTRL, MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, @@ -5239,6 +5239,10 @@ static void kvm_init_msr_list(void) * to the guests in some cases. */ switch (msrs_to_save_all[i]) { + case MSR_TEST_CTRL: + if (!kvm_split_lock_detect_supported()) + continue; + break; case MSR_IA32_BNDCFGS: if (!kvm_mpx_supported()) continue; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index b968acc0516f..346a7527c2c6 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -355,6 +355,13 @@ static inline bool kvm_dr7_valid(u64 data) return !(data >> 32); } +static inline bool kvm_split_lock_detect_supported(void) +{ + return boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) && + (boot_cpu_has(X86_FEATURE_SLD_FATAL) || + !cpu_smt_possible()); +} + void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu); From patchwork Sat May 9 11:05:42 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xiaoyao Li X-Patchwork-Id: 11537915 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 0E25115E6 for ; Sat, 9 May 2020 03:04:14 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 01041218AC for ; Sat, 9 May 2020 03:04:14 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728833AbgEIDEK (ORCPT ); Fri, 8 May 2020 23:04:10 -0400 Received: from mga12.intel.com ([192.55.52.136]:55138 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728827AbgEIDEJ (ORCPT ); Fri, 8 May 2020 23:04:09 -0400 IronPort-SDR: ZKr9Wk68/Pl3/Opp8VOpwDalsIlqjiWG3+g9zheNQnvSXXrE+BQajGgkZgyavLXced71Q4d5rS M/iNyRzH9pIw== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 May 2020 20:04:08 -0700 IronPort-SDR: o2QC/4o0beIrhW/7RJufTIdgMZFUc4WpQSPUDvwdSOz33dN//gpE/k1PcxXCvrgXQejN7GRWQ1 DUktOluMBc0g== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,370,1583222400"; d="scan'208";a="408311125" Received: from lxy-dell.sh.intel.com ([10.239.159.21]) by orsmga004.jf.intel.com with ESMTP; 08 May 2020 20:04:03 -0700 From: Xiaoyao Li To: Paolo Bonzini , Thomas Gleixner , Sean Christopherson , kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org, x86@kernel.org, Ingo Molnar , Borislav Petkov , "H. Peter Anvin" , Andy Lutomirski , peterz@infradead.org, Arvind Sankar , Tony Luck , Fenghua Yu , Xiaoyao Li Subject: [PATCH v9 8/8] x86/split_lock: Enable split lock detection initialization when running as an guest on KVM Date: Sat, 9 May 2020 19:05:42 +0800 Message-Id: <20200509110542.8159-9-xiaoyao.li@intel.com> X-Mailer: git-send-email 2.18.2 In-Reply-To: <20200509110542.8159-1-xiaoyao.li@intel.com> References: <20200509110542.8159-1-xiaoyao.li@intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org When running as guest, enumerating feature split lock detection through CPU model is not easy since CPU model is configurable by host VMM. If running upon KVM, it can be enumerated through KVM_FEATURE_SPLIT_LOCK_DETECT, and if KVM_HINTS_SLD_FATAL is set, it needs to be set to sld_fatal mode. Signed-off-by: Xiaoyao Li --- arch/x86/include/asm/cpu.h | 2 ++ arch/x86/kernel/cpu/intel.c | 12 ++++++++++-- arch/x86/kernel/kvm.c | 3 +++ 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index a57f00f1d5b5..5d5b488b4b45 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -42,12 +42,14 @@ unsigned int x86_model(unsigned int sig); unsigned int x86_stepping(unsigned int sig); #ifdef CONFIG_CPU_SUP_INTEL extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c); +extern void __init split_lock_setup(bool fatal); extern void switch_to_sld(unsigned long tifn); extern bool handle_user_split_lock(struct pt_regs *regs, long error_code); extern bool handle_guest_split_lock(unsigned long ip); extern bool split_lock_virt_switch(bool on); #else static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {} +static inline void __init split_lock_setup(bool fatal) {} static inline void switch_to_sld(unsigned long tifn) {} static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code) { diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 1e2a74e8c592..02e24134b9b5 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -996,12 +996,18 @@ static bool split_lock_verify_msr(bool on) return ctrl == tmp; } -static void __init split_lock_setup(void) +void __init split_lock_setup(bool fatal) { enum split_lock_detect_state state = sld_warn; char arg[20]; int i, ret; + if (fatal) { + state = sld_fatal; + pr_info("forced on, sending SIGBUS on user-space split_locks\n"); + goto set_cap; + } + if (!split_lock_verify_msr(false)) { pr_info("MSR access failed: Disabled\n"); return; @@ -1037,6 +1043,7 @@ static void __init split_lock_setup(void) return; } +set_cap: setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT); if (state == sld_fatal) setup_force_cpu_cap(X86_FEATURE_SLD_FATAL); @@ -1161,6 +1168,7 @@ void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) const struct x86_cpu_id *m; u64 ia32_core_caps; + /* Note, paravirt support can enable SLD, e.g., see kvm_guest_init(). */ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) return; @@ -1182,5 +1190,5 @@ void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) return; } - split_lock_setup(); + split_lock_setup(false); } diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 6efe0410fb72..489ea89e2e8e 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -670,6 +670,9 @@ static void __init kvm_guest_init(void) * overcommitted. */ hardlockup_detector_disable(); + + if (kvm_para_has_feature(KVM_FEATURE_SPLIT_LOCK_DETECT)) + split_lock_setup(kvm_para_has_hint(KVM_HINTS_SLD_FATAL)); } static noinline uint32_t __kvm_cpuid_base(void)