Message ID | 22302ebd-ea41-afd8-cc5d-fb209786ba4c@citrix.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
>>> On 13.10.16 at 15:26, <andrew.cooper3@citrix.com> wrote: > On 13/10/16 13:57, Jan Beulich wrote: >> Commit 6dc9ac9f52 ("x86emul: check alignment of SSE and AVX memory >> operands") didn't consider a specific AMD mode: Mis-alignment #GP >> faults can be masked on some of their hardware. >> >> Signed-off-by: Jan Beulich <jbeulich@suse.com> > > This highlights that the following CPUID dependency change is also required > > diff --git a/xen/tools/gen-cpuid.py b/xen/tools/gen-cpuid.py > index 33e68eb..e803654 100755 > --- a/xen/tools/gen-cpuid.py > +++ b/xen/tools/gen-cpuid.py > @@ -185,8 +185,9 @@ def crunch_numbers(state): > # the first place. > APIC: [X2APIC], > > - # AMD built MMXExtentions and 3DNow as extentions to MMX. > - MMX: [MMXEXT, _3DNOW], > + # AMD built MMXExtentions, 3DNow and SSE Misalignment as > extensions to > + # MMX. > + MMX: [MMXEXT, _3DNOW, MISALIGNSSE], > > # The FXSAVE/FXRSTOR instructions were introduced into hardware > before > # SSE, which is why they behave differently based on > %CR4.OSFXSAVE and Hmm, for one this is an orthogonal change, so doesn't belong in this patch. And then - why is this an extension to MMX (which doesn't have any alignment requirements anyway) rather than SSE? >> @@ -4675,7 +4679,13 @@ x86_emulate( >> ea.bytes = vex.pfx & VEX_PREFIX_DOUBLE_MASK ? 8 : 4; >> if ( ea.type == OP_MEM ) >> { >> - generate_exception_if((b >= 0x28) && >> + uint32_t mxcsr = 0; >> + >> + if ( b < 0x28 ) >> + mxcsr = MXCSR_MM; >> + else if ( vcpu_has_misalignsse() ) >> + asm ( "stmxcsr %0" : "=m" (mxcsr) ); >> + generate_exception_if(!(mxcsr & MXCSR_MM) && >> !is_aligned(ea.mem.seg, ea.mem.off, ea.bytes, >> ctxt, ops), >> EXC_GP, 0); >> @@ -4955,7 +4965,13 @@ x86_emulate( >> } >> if ( ea.type == OP_MEM ) >> { >> - generate_exception_if((vex.pfx == vex_66) && >> + uint32_t mxcsr = 0; >> + >> + if ( vex.pfx != vex_66 ) >> + mxcsr = MXCSR_MM; >> + else if ( vcpu_has_misalignsse() ) >> + asm ( "stmxcsr %0" : "=m" (mxcsr) ); >> + generate_exception_if(!(mxcsr & MXCSR_MM) && >> !is_aligned(ea.mem.seg, ea.mem.off, ea.bytes, >> ctxt, ops), >> EXC_GP, 0); > > According to the docs, we should also be possibly raising #AC here. Well, you've said the same in a different context not so long ago, and my answer is then also the same: As long as we don't do any #AC generation, I see no reason why we would want to create an exception here. Jan
On 14/10/16 07:20, Jan Beulich wrote: >>>> On 13.10.16 at 15:26, <andrew.cooper3@citrix.com> wrote: >> On 13/10/16 13:57, Jan Beulich wrote: >>> Commit 6dc9ac9f52 ("x86emul: check alignment of SSE and AVX memory >>> operands") didn't consider a specific AMD mode: Mis-alignment #GP >>> faults can be masked on some of their hardware. >>> >>> Signed-off-by: Jan Beulich <jbeulich@suse.com> >> This highlights that the following CPUID dependency change is also required >> >> diff --git a/xen/tools/gen-cpuid.py b/xen/tools/gen-cpuid.py >> index 33e68eb..e803654 100755 >> --- a/xen/tools/gen-cpuid.py >> +++ b/xen/tools/gen-cpuid.py >> @@ -185,8 +185,9 @@ def crunch_numbers(state): >> # the first place. >> APIC: [X2APIC], >> >> - # AMD built MMXExtentions and 3DNow as extentions to MMX. >> - MMX: [MMXEXT, _3DNOW], >> + # AMD built MMXExtentions, 3DNow and SSE Misalignment as >> extensions to >> + # MMX. >> + MMX: [MMXEXT, _3DNOW, MISALIGNSSE], >> >> # The FXSAVE/FXRSTOR instructions were introduced into hardware >> before >> # SSE, which is why they behave differently based on >> %CR4.OSFXSAVE and > Hmm, for one this is an orthogonal change, so doesn't belong in this > patch. And then - why is this an extension to MMX (which doesn't > have any alignment requirements anyway) rather than SSE? Sorry - I have no idea why I suggested MMX. It should have been SSE. > >>> @@ -4675,7 +4679,13 @@ x86_emulate( >>> ea.bytes = vex.pfx & VEX_PREFIX_DOUBLE_MASK ? 8 : 4; >>> if ( ea.type == OP_MEM ) >>> { >>> - generate_exception_if((b >= 0x28) && >>> + uint32_t mxcsr = 0; >>> + >>> + if ( b < 0x28 ) >>> + mxcsr = MXCSR_MM; >>> + else if ( vcpu_has_misalignsse() ) >>> + asm ( "stmxcsr %0" : "=m" (mxcsr) ); >>> + generate_exception_if(!(mxcsr & MXCSR_MM) && >>> !is_aligned(ea.mem.seg, ea.mem.off, ea.bytes, >>> ctxt, ops), >>> EXC_GP, 0); >>> @@ -4955,7 +4965,13 @@ x86_emulate( >>> } >>> if ( ea.type == OP_MEM ) >>> { >>> - generate_exception_if((vex.pfx == vex_66) && >>> + uint32_t mxcsr = 0; >>> + >>> + if ( vex.pfx != vex_66 ) >>> + mxcsr = MXCSR_MM; >>> + else if ( vcpu_has_misalignsse() ) >>> + asm ( "stmxcsr %0" : "=m" (mxcsr) ); >>> + generate_exception_if(!(mxcsr & MXCSR_MM) && >>> !is_aligned(ea.mem.seg, ea.mem.off, ea.bytes, >>> ctxt, ops), >>> EXC_GP, 0); >> According to the docs, we should also be possibly raising #AC here. > Well, you've said the same in a different context not so long ago, > and my answer is then also the same: As long as we don't do any > #AC generation, I see no reason why we would want to create an > exception here. Ah yes. I had managed to let that slip my mind. Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
diff --git a/xen/tools/gen-cpuid.py b/xen/tools/gen-cpuid.py index 33e68eb..e803654 100755 --- a/xen/tools/gen-cpuid.py +++ b/xen/tools/gen-cpuid.py @@ -185,8 +185,9 @@ def crunch_numbers(state): # the first place. APIC: [X2APIC], - # AMD built MMXExtentions and 3DNow as extentions to MMX. - MMX: [MMXEXT, _3DNOW], + # AMD built MMXExtentions, 3DNow and SSE Misalignment as extensions to + # MMX. + MMX: [MMXEXT, _3DNOW, MISALIGNSSE], # The FXSAVE/FXRSTOR instructions were introduced into hardware