diff mbox series

[v3,4/8] x86/split_lock: Add and export split_lock_detect_enabled() and split_lock_detect_fatal()

Message ID 20200206070412.17400-5-xiaoyao.li@intel.com (mailing list archive)
State New, archived
Headers show
Series kvm/split_lock: Add feature split lock detection support in kvm | expand

Commit Message

Xiaoyao Li Feb. 6, 2020, 7:04 a.m. UTC
These two functions will be used by KVM to check whether host's
sld_state.

Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
---
 arch/x86/include/asm/cpu.h  |  4 ++++
 arch/x86/kernel/cpu/intel.c | 12 ++++++++++++
 2 files changed, 16 insertions(+)

Comments

Sean Christopherson March 3, 2020, 6:59 p.m. UTC | #1
On Thu, Feb 06, 2020 at 03:04:08PM +0800, Xiaoyao Li wrote:
> These two functions will be used by KVM to check whether host's
> sld_state.
> 
> Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
> ---
>  arch/x86/include/asm/cpu.h  |  4 ++++
>  arch/x86/kernel/cpu/intel.c | 12 ++++++++++++
>  2 files changed, 16 insertions(+)
> 
> diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
> index 2b20829db450..f5172dbd3f01 100644
> --- a/arch/x86/include/asm/cpu.h
> +++ b/arch/x86/include/asm/cpu.h
> @@ -46,6 +46,8 @@ unsigned int x86_stepping(unsigned int sig);
>  extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c);
>  extern void switch_to_sld(unsigned long tifn);
>  extern bool handle_user_split_lock(unsigned long ip);
> +extern bool split_lock_detect_enabled(void);
> +extern bool split_lock_detect_fatal(void);
>  #else
>  static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {}
>  static inline void switch_to_sld(unsigned long tifn) {}
> @@ -53,5 +55,7 @@ static inline bool handle_user_split_lock(unsigned long ip)
>  {
>  	return false;
>  }
> +static inline bool split_lock_detect_enabled(void) { return false; }
> +static inline bool split_lock_detect_fatal(void) { return false; }
>  #endif
>  #endif /* _ASM_X86_CPU_H */
> diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
> index ff27d026cb4a..b67b46ea66df 100644
> --- a/arch/x86/kernel/cpu/intel.c
> +++ b/arch/x86/kernel/cpu/intel.c
> @@ -1131,3 +1131,15 @@ void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
>  	if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)
>  		split_lock_setup();
>  }
> +
> +bool split_lock_detect_enabled(void)
> +{
> +	return sld_state != sld_off;
> +}
> +EXPORT_SYMBOL_GPL(split_lock_detect_enabled);

Hmm, ideally this would be static inline.  Patch 8 (to expose SLD to the
guest) queries this in vmx_vcpu_run(), I'd prefer to avoid the extra
CALL+RET in that path.

> +bool split_lock_detect_fatal(void)
> +{
> +	return sld_state == sld_fatal;
> +}
> +EXPORT_SYMBOL_GPL(split_lock_detect_fatal);

split_lock_detect_fatal() isn't used in this series, it shouldn't be added.

> -- 
> 2.23.0
>
diff mbox series

Patch

diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 2b20829db450..f5172dbd3f01 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -46,6 +46,8 @@  unsigned int x86_stepping(unsigned int sig);
 extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c);
 extern void switch_to_sld(unsigned long tifn);
 extern bool handle_user_split_lock(unsigned long ip);
+extern bool split_lock_detect_enabled(void);
+extern bool split_lock_detect_fatal(void);
 #else
 static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {}
 static inline void switch_to_sld(unsigned long tifn) {}
@@ -53,5 +55,7 @@  static inline bool handle_user_split_lock(unsigned long ip)
 {
 	return false;
 }
+static inline bool split_lock_detect_enabled(void) { return false; }
+static inline bool split_lock_detect_fatal(void) { return false; }
 #endif
 #endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index ff27d026cb4a..b67b46ea66df 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -1131,3 +1131,15 @@  void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
 	if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)
 		split_lock_setup();
 }
+
+bool split_lock_detect_enabled(void)
+{
+	return sld_state != sld_off;
+}
+EXPORT_SYMBOL_GPL(split_lock_detect_enabled);
+
+bool split_lock_detect_fatal(void)
+{
+	return sld_state == sld_fatal;
+}
+EXPORT_SYMBOL_GPL(split_lock_detect_fatal);