From: Dave Hansen <dave.hansen@linux.intel.com>
A bunch of crypto code tries to do the same detection logic. Some
get it right, but most probably get it wrong.
For instance, some check for XFEATURE_MASK_YMM, but don't check
for AVX itself. Especially if the *software* X86_FEATURE_AVX bit
is cleared, we might end up with XFEATURE_MASK_YMM set, but we
do not want to support AVX.
This also formally checks for SSE2 before checking for AVX and
AVX prior to the AVX2 checks, as the SDM suggests.
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
---
b/arch/x86/include/asm/feature-checks.h | 32 ++++++++++++++++++++++++++++++++
1 file changed, 32 insertions(+)
@@ -0,0 +1,32 @@
+#ifndef _ASM_X86_FEATURE_CHECKS_H
+#define _ASM_X86_FEATURE_CHECKS_H
+
+/*
+ *
+ */
+
+static inline bool __init sse2_usable(void)
+{
+ if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
+ return false;
+ return true;
+}
+
+static inline bool __init avx_usable(void)
+{
+ if (!sse2_usable())
+ return false;
+ if (!cpu_has_avx || !cpu_has_osxsave)
+ return false;
+ return true;
+}
+
+static inline bool __init avx2_usable(void)
+{
+ if (avx_usable() && cpu_has_avx2)
+ return true;
+
+ return false;
+}
+
+#endif /* _ASM_X86_FEATURE_CHECKS_H */
_