diff mbox series

[v9,8/8] x86/split_lock: Enable split lock detection initialization when running as an guest on KVM

Message ID 20200509110542.8159-9-xiaoyao.li@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: Add virtualization support of split lock detection | expand

Commit Message

Xiaoyao Li May 9, 2020, 11:05 a.m. UTC
When running as guest, enumerating feature split lock detection through
CPU model is not easy since CPU model is configurable by host VMM.

If running upon KVM, it can be enumerated through
KVM_FEATURE_SPLIT_LOCK_DETECT, and if KVM_HINTS_SLD_FATAL is set, it
needs to be set to sld_fatal mode.

Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
---
 arch/x86/include/asm/cpu.h  |  2 ++
 arch/x86/kernel/cpu/intel.c | 12 ++++++++++--
 arch/x86/kernel/kvm.c       |  3 +++
 3 files changed, 15 insertions(+), 2 deletions(-)

Comments

Andy Lutomirski May 10, 2020, 5:15 a.m. UTC | #1
On Fri, May 8, 2020 at 8:04 PM Xiaoyao Li <xiaoyao.li@intel.com> wrote:
>
> When running as guest, enumerating feature split lock detection through
> CPU model is not easy since CPU model is configurable by host VMM.
>
> If running upon KVM, it can be enumerated through
> KVM_FEATURE_SPLIT_LOCK_DETECT,

This needs crystal clear documentation.  What, exactly, is the host
telling the guest if it sets this flag?

> and if KVM_HINTS_SLD_FATAL is set, it
> needs to be set to sld_fatal mode.


This needs much better docs.  Do you mean:

"If KVM_HINTS_SLD_FATAL is set, then the guest will get #AC if it does
a split-lock regardless of what is written to MSR_TEST_CTRL?"


>
> Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
> ---
>  arch/x86/include/asm/cpu.h  |  2 ++
>  arch/x86/kernel/cpu/intel.c | 12 ++++++++++--
>  arch/x86/kernel/kvm.c       |  3 +++
>  3 files changed, 15 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
> index a57f00f1d5b5..5d5b488b4b45 100644
> --- a/arch/x86/include/asm/cpu.h
> +++ b/arch/x86/include/asm/cpu.h
> @@ -42,12 +42,14 @@ unsigned int x86_model(unsigned int sig);
>  unsigned int x86_stepping(unsigned int sig);
>  #ifdef CONFIG_CPU_SUP_INTEL
>  extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c);
> +extern void __init split_lock_setup(bool fatal);
>  extern void switch_to_sld(unsigned long tifn);
>  extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
>  extern bool handle_guest_split_lock(unsigned long ip);
>  extern bool split_lock_virt_switch(bool on);
>  #else
>  static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {}
> +static inline void __init split_lock_setup(bool fatal) {}
>  static inline void switch_to_sld(unsigned long tifn) {}
>  static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code)
>  {
> diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
> index 1e2a74e8c592..02e24134b9b5 100644
> --- a/arch/x86/kernel/cpu/intel.c
> +++ b/arch/x86/kernel/cpu/intel.c
> @@ -996,12 +996,18 @@ static bool split_lock_verify_msr(bool on)
>         return ctrl == tmp;
>  }
>
> -static void __init split_lock_setup(void)
> +void __init split_lock_setup(bool fatal)
>  {
>         enum split_lock_detect_state state = sld_warn;
>         char arg[20];
>         int i, ret;
>
> +       if (fatal) {
> +               state = sld_fatal;
> +               pr_info("forced on, sending SIGBUS on user-space split_locks\n");
> +               goto set_cap;
> +       }
> +
>         if (!split_lock_verify_msr(false)) {
>                 pr_info("MSR access failed: Disabled\n");
>                 return;
> @@ -1037,6 +1043,7 @@ static void __init split_lock_setup(void)
>                 return;
>         }
>
> +set_cap:
>         setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
>         if (state == sld_fatal)
>                 setup_force_cpu_cap(X86_FEATURE_SLD_FATAL);
> @@ -1161,6 +1168,7 @@ void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
>         const struct x86_cpu_id *m;
>         u64 ia32_core_caps;
>
> +       /* Note, paravirt support can enable SLD, e.g., see kvm_guest_init(). */
>         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
>                 return;
>
> @@ -1182,5 +1190,5 @@ void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
>                 return;
>         }
>
> -       split_lock_setup();
> +       split_lock_setup(false);
>  }
> diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> index 6efe0410fb72..489ea89e2e8e 100644
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -670,6 +670,9 @@ static void __init kvm_guest_init(void)
>          * overcommitted.
>          */
>         hardlockup_detector_disable();
> +
> +       if (kvm_para_has_feature(KVM_FEATURE_SPLIT_LOCK_DETECT))
> +               split_lock_setup(kvm_para_has_hint(KVM_HINTS_SLD_FATAL));
>  }
>
>  static noinline uint32_t __kvm_cpuid_base(void)
> --
> 2.18.2
>
Xiaoyao Li May 11, 2020, 1:11 a.m. UTC | #2
On 5/10/2020 1:15 PM, Andy Lutomirski wrote:
> On Fri, May 8, 2020 at 8:04 PM Xiaoyao Li <xiaoyao.li@intel.com> wrote:
>>
>> When running as guest, enumerating feature split lock detection through
>> CPU model is not easy since CPU model is configurable by host VMM.
>>
>> If running upon KVM, it can be enumerated through
>> KVM_FEATURE_SPLIT_LOCK_DETECT,
> 
> This needs crystal clear documentation.  What, exactly, is the host
> telling the guest if it sets this flag?
> 
>> and if KVM_HINTS_SLD_FATAL is set, it
>> needs to be set to sld_fatal mode.
> 
> 
> This needs much better docs.  Do you mean:
> 
> "If KVM_HINTS_SLD_FATAL is set, then the guest will get #AC if it does
> a split-lock regardless of what is written to MSR_TEST_CTRL?"
> 

Hi Andy,

KVM_FEATURE_SPLIT_LOCK_DETECT, KVM_HINTS_SLD_FATAL and their docs are 
introduced in Patch 5. Do I still need to explain them in detail in this 
patch?
diff mbox series

Patch

diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index a57f00f1d5b5..5d5b488b4b45 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -42,12 +42,14 @@  unsigned int x86_model(unsigned int sig);
 unsigned int x86_stepping(unsigned int sig);
 #ifdef CONFIG_CPU_SUP_INTEL
 extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c);
+extern void __init split_lock_setup(bool fatal);
 extern void switch_to_sld(unsigned long tifn);
 extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
 extern bool handle_guest_split_lock(unsigned long ip);
 extern bool split_lock_virt_switch(bool on);
 #else
 static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {}
+static inline void __init split_lock_setup(bool fatal) {}
 static inline void switch_to_sld(unsigned long tifn) {}
 static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code)
 {
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 1e2a74e8c592..02e24134b9b5 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -996,12 +996,18 @@  static bool split_lock_verify_msr(bool on)
 	return ctrl == tmp;
 }
 
-static void __init split_lock_setup(void)
+void __init split_lock_setup(bool fatal)
 {
 	enum split_lock_detect_state state = sld_warn;
 	char arg[20];
 	int i, ret;
 
+	if (fatal) {
+		state = sld_fatal;
+		pr_info("forced on, sending SIGBUS on user-space split_locks\n");
+		goto set_cap;
+	}
+
 	if (!split_lock_verify_msr(false)) {
 		pr_info("MSR access failed: Disabled\n");
 		return;
@@ -1037,6 +1043,7 @@  static void __init split_lock_setup(void)
 		return;
 	}
 
+set_cap:
 	setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
 	if (state == sld_fatal)
 		setup_force_cpu_cap(X86_FEATURE_SLD_FATAL);
@@ -1161,6 +1168,7 @@  void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
 	const struct x86_cpu_id *m;
 	u64 ia32_core_caps;
 
+	/* Note, paravirt support can enable SLD, e.g., see kvm_guest_init(). */
 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 		return;
 
@@ -1182,5 +1190,5 @@  void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
 		return;
 	}
 
-	split_lock_setup();
+	split_lock_setup(false);
 }
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 6efe0410fb72..489ea89e2e8e 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -670,6 +670,9 @@  static void __init kvm_guest_init(void)
 	 * overcommitted.
 	 */
 	hardlockup_detector_disable();
+
+	if (kvm_para_has_feature(KVM_FEATURE_SPLIT_LOCK_DETECT))
+		split_lock_setup(kvm_para_has_hint(KVM_HINTS_SLD_FATAL));
 }
 
 static noinline uint32_t __kvm_cpuid_base(void)