Message ID | 20160718143020.14828-2-mdontu@bitdefender.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 18/07/16 15:30, Mihai Donțu wrote: > @@ -4409,6 +4409,10 @@ x86_emulate( > case 0x6f: /* movq mm/m64,mm */ > /* {,v}movdq{a,u} xmm/m128,xmm */ > /* vmovdq{a,u} ymm/m256,ymm */ > + case 0x7e: /* movd mm,r/m32 */ > + /* movq mm,r/m64 */ > + /* {,v}movd xmm,r/m32 */ > + /* {,v}movq xmm,r/m64 */ This exposes a vulnerability where a guest can clobber local state in x86_emulate, by specifying registers such as %ebx as the destination. You must either 1) Move this case up above the fail_if(ea.type != OP_MEM); check, or 2) modify the stub logic to convert a GPR destination to a memory address pointing into _regs. ~Andrew
On Monday 18 July 2016 15:57:09 Andrew Cooper wrote: > On 18/07/16 15:30, Mihai Donțu wrote: > > @@ -4409,6 +4409,10 @@ x86_emulate( > > case 0x6f: /* movq mm/m64,mm */ > > /* {,v}movdq{a,u} xmm/m128,xmm */ > > /* vmovdq{a,u} ymm/m256,ymm */ > > + case 0x7e: /* movd mm,r/m32 */ > > + /* movq mm,r/m64 */ > > + /* {,v}movd xmm,r/m32 */ > > + /* {,v}movq xmm,r/m64 */ > > This exposes a vulnerability where a guest can clobber local state in > x86_emulate, by specifying registers such as %ebx as the destination. > > You must either > 1) Move this case up above the fail_if(ea.type != OP_MEM); check, or > 2) modify the stub logic to convert a GPR destination to a memory > address pointing into _regs. I'm taking a look at (2) as it feels like the best approach. If I'm not making any good progress in the coming days, I'll fallback to (1). Thank you,
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c index 0301235..2a56a67 100644 --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -204,7 +204,7 @@ static uint8_t twobyte_table[256] = { /* 0x60 - 0x6F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps|ModRM, /* 0x70 - 0x7F */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps|ModRM, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps|ModRM, ImplicitOps|ModRM, /* 0x80 - 0x87 */ ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, @@ -4409,6 +4409,10 @@ x86_emulate( case 0x6f: /* movq mm/m64,mm */ /* {,v}movdq{a,u} xmm/m128,xmm */ /* vmovdq{a,u} ymm/m256,ymm */ + case 0x7e: /* movd mm,r/m32 */ + /* movq mm,r/m64 */ + /* {,v}movd xmm,r/m32 */ + /* {,v}movq xmm,r/m64 */ case 0x7f: /* movq mm,mm/m64 */ /* {,v}movdq{a,u} xmm,xmm/m128 */ /* vmovdq{a,u} ymm,ymm/m256 */ @@ -4432,7 +4436,17 @@ x86_emulate( host_and_vcpu_must_have(sse2); buf[0] = 0x66; /* SSE */ get_fpu(X86EMUL_FPU_xmm, &fic); - ea.bytes = (b == 0xd6 ? 8 : 16); + switch ( b ) + { + case 0x7e: + ea.bytes = 4; + break; + case 0xd6: + ea.bytes = 8; + break; + default: + ea.bytes = 16; + } break; case vex_none: if ( b != 0xe7 ) @@ -4452,7 +4466,17 @@ x86_emulate( ((vex.pfx != vex_66) && (vex.pfx != vex_f3))); host_and_vcpu_must_have(avx); get_fpu(X86EMUL_FPU_ymm, &fic); - ea.bytes = (b == 0xd6 ? 8 : 16 << vex.l); + switch ( b ) + { + case 0x7e: + ea.bytes = 4; + break; + case 0xd6: + ea.bytes = 8; + break; + default: + ea.bytes = 16 << vex.l; + } } if ( ea.type == OP_MEM ) {