diff mbox

[v2] x86: drop cpu_has_sse{,2}

Message ID 586E7072020000780012D7AC@prv-mh.provo.novell.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jan Beulich Jan. 5, 2017, 3:12 p.m. UTC
Commit dc88221c97 ("x86: rename XMM* features to SSE*") pointlessly
added them - these features are always available on 64-bit CPUs. (Let's
not assume this for MMX though in at least the insn emulator.)

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Add a test harness comment clarifying host_and_vcpu_must_have() vs
    vcpu_must_have() use there.
x86: drop cpu_has_sse{,2}

Commit dc88221c97 ("x86: rename XMM* features to SSE*") pointlessly
added them - these features are always available on 64-bit CPUs. (Let's
not assume this for MMX though in at least the insn emulator.)

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Add a test harness comment clarifying host_and_vcpu_must_have() vs
    vcpu_must_have() use there.

--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1326,6 +1326,11 @@ static bool vcpu_has(
     vcpu_must_have(feat); \
 })
 #else
+/*
+ * For the test harness both are fine to be used interchangeably, i.e.
+ * features known to always be available (e.g. SSE/SSE2) to (64-bit) Xen
+ * may be checked for by just vcpu_must_have().
+ */
 #define host_and_vcpu_must_have(feat) vcpu_must_have(feat)
 #endif
 
@@ -4910,9 +4915,9 @@ x86_emulate(
         if ( vex.opcx == vex_none )
         {
             if ( vex.pfx & VEX_PREFIX_DOUBLE_MASK )
-                host_and_vcpu_must_have(sse2);
+                vcpu_must_have(sse2);
             else
-                host_and_vcpu_must_have(sse);
+                vcpu_must_have(sse);
             ea.bytes = 16;
             SET_SSE_PREFIX(buf[0], vex.pfx);
             get_fpu(X86EMUL_FPU_xmm, &fic);
@@ -5183,7 +5188,7 @@ x86_emulate(
             {
             case vex_66:
             case vex_f3:
-                host_and_vcpu_must_have(sse2);
+                vcpu_must_have(sse2);
                 /* Converting movdqu to movdqa here: Our buffer is aligned. */
                 buf[0] = 0x66;
                 get_fpu(X86EMUL_FPU_xmm, &fic);
@@ -5193,7 +5198,7 @@ x86_emulate(
                 if ( b != 0xe7 )
                     host_and_vcpu_must_have(mmx);
                 else
-                    host_and_vcpu_must_have(sse);
+                    vcpu_must_have(sse);
                 get_fpu(X86EMUL_FPU_mmx, &fic);
                 ea.bytes = 8;
                 break;
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -38,8 +38,6 @@
 #define cpu_has_sep		boot_cpu_has(X86_FEATURE_SEP)
 #define cpu_has_mtrr		1
 #define cpu_has_mmx		1
-#define cpu_has_sse		boot_cpu_has(X86_FEATURE_SSE)
-#define cpu_has_sse2		boot_cpu_has(X86_FEATURE_SSE2)
 #define cpu_has_sse3		boot_cpu_has(X86_FEATURE_SSE3)
 #define cpu_has_sse4_2		boot_cpu_has(X86_FEATURE_SSE4_2)
 #define cpu_has_htt		boot_cpu_has(X86_FEATURE_HTT)

Comments

Andrew Cooper Jan. 5, 2017, 3:19 p.m. UTC | #1
On 05/01/17 15:12, Jan Beulich wrote:
> Commit dc88221c97 ("x86: rename XMM* features to SSE*") pointlessly
> added them - these features are always available on 64-bit CPUs. (Let's
> not assume this for MMX though in at least the insn emulator.)
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
diff mbox

Patch

--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1326,6 +1326,11 @@  static bool vcpu_has(
     vcpu_must_have(feat); \
 })
 #else
+/*
+ * For the test harness both are fine to be used interchangeably, i.e.
+ * features known to always be available (e.g. SSE/SSE2) to (64-bit) Xen
+ * may be checked for by just vcpu_must_have().
+ */
 #define host_and_vcpu_must_have(feat) vcpu_must_have(feat)
 #endif
 
@@ -4910,9 +4915,9 @@  x86_emulate(
         if ( vex.opcx == vex_none )
         {
             if ( vex.pfx & VEX_PREFIX_DOUBLE_MASK )
-                host_and_vcpu_must_have(sse2);
+                vcpu_must_have(sse2);
             else
-                host_and_vcpu_must_have(sse);
+                vcpu_must_have(sse);
             ea.bytes = 16;
             SET_SSE_PREFIX(buf[0], vex.pfx);
             get_fpu(X86EMUL_FPU_xmm, &fic);
@@ -5183,7 +5188,7 @@  x86_emulate(
             {
             case vex_66:
             case vex_f3:
-                host_and_vcpu_must_have(sse2);
+                vcpu_must_have(sse2);
                 /* Converting movdqu to movdqa here: Our buffer is aligned. */
                 buf[0] = 0x66;
                 get_fpu(X86EMUL_FPU_xmm, &fic);
@@ -5193,7 +5198,7 @@  x86_emulate(
                 if ( b != 0xe7 )
                     host_and_vcpu_must_have(mmx);
                 else
-                    host_and_vcpu_must_have(sse);
+                    vcpu_must_have(sse);
                 get_fpu(X86EMUL_FPU_mmx, &fic);
                 ea.bytes = 8;
                 break;
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -38,8 +38,6 @@ 
 #define cpu_has_sep		boot_cpu_has(X86_FEATURE_SEP)
 #define cpu_has_mtrr		1
 #define cpu_has_mmx		1
-#define cpu_has_sse		boot_cpu_has(X86_FEATURE_SSE)
-#define cpu_has_sse2		boot_cpu_has(X86_FEATURE_SSE2)
 #define cpu_has_sse3		boot_cpu_has(X86_FEATURE_SSE3)
 #define cpu_has_sse4_2		boot_cpu_has(X86_FEATURE_SSE4_2)
 #define cpu_has_htt		boot_cpu_has(X86_FEATURE_HTT)