Message ID | 57D18700020000780010D264@prv-mh.provo.novell.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 08/09/16 14:42, Jan Beulich wrote: > To make this complete, also add support for SLDT and STR. Note that by > just looking at the guest CR4 bit, this is independent of actually > making available the UMIP feature to guests. > > Signed-off-by: Jan Beulich <jbeulich@suse.com> > > --- a/xen/arch/x86/x86_emulate/x86_emulate.c > +++ b/xen/arch/x86/x86_emulate/x86_emulate.c > @@ -182,7 +182,7 @@ static const opcode_desc_t opcode_table[ > > static const opcode_desc_t twobyte_table[256] = { > /* 0x00 - 0x07 */ > - SrcMem16|ModRM, ImplicitOps|ModRM, ModRM, ModRM, > + ModRM, ImplicitOps|ModRM, ModRM, ModRM, > 0, ImplicitOps, ImplicitOps, ImplicitOps, > /* 0x08 - 0x0F */ > ImplicitOps, ImplicitOps, 0, ImplicitOps, > @@ -421,6 +421,7 @@ typedef union { > /* Control register flags. */ > #define CR0_PE (1<<0) > #define CR4_TSD (1<<2) > +#define CR4_UMIP (1<<11) > > /* EFLAGS bit definitions. */ > #define EFLG_VIP (1<<20) > @@ -1484,6 +1485,17 @@ static bool is_aligned(enum x86_segment > return !((reg.base + offs) & (size - 1)); > } > > +static bool is_umip(struct x86_emulate_ctxt *ctxt, > + const struct x86_emulate_ops *ops) is_umip is an odd way of phrasing this. umip_enabled() or is_umip_enabled() would be better. > +{ > + unsigned long cr4; > + > + /* Intentionally not using mode_ring0() here to avoid its fail_if(). */ > + return get_cpl(ctxt, ops) > 0 && > + ops->read_cr && ops->read_cr(4, &cr4, ctxt) == X86EMUL_OKAY && > + (cr4 & CR4_UMIP); > +} > + > /* Inject a software interrupt/exception, emulating if needed. */ > static int inject_swint(enum x86_swint_type type, > uint8_t vector, uint8_t insn_len, > @@ -2051,10 +2063,20 @@ x86_decode( > break; > > case ext_0f: > - case ext_0f3a: > - case ext_8f08: > - case ext_8f09: > - case ext_8f0a: > + switch ( b ) > + { > + case 0x00: /* Grp6 */ > + switch ( modrm_reg & 6 ) > + { > + case 0: > + d |= DstMem | SrcImplicit | Mov; > + break; > + case 2: case 4: > + d |= SrcMem16; > + break; > + } > + break; > + } > break; > > case ext_0f38: > @@ -2070,6 +2092,12 @@ x86_decode( > } > break; > > + case ext_0f3a: > + case ext_8f08: > + case ext_8f09: > + case ext_8f0a: > + break; > + > default: > ASSERT_UNREACHABLE(); > } > @@ -4177,14 +4205,31 @@ x86_emulate( > } > break; > > - case X86EMUL_OPC(0x0f, 0x00): /* Grp6 */ > - fail_if((modrm_reg & 6) != 2); > + case X86EMUL_OPC(0x0f, 0x00): /* Grp6 */ { Newline for { > + enum x86_segment seg = (modrm_reg & 1) ? x86_seg_tr : x86_seg_ldtr; > + > + fail_if(modrm_reg & 4); > generate_exception_if(!in_protmode(ctxt, ops), EXC_UD, -1); > - generate_exception_if(!mode_ring0(), EXC_GP, 0); > - if ( (rc = load_seg((modrm_reg & 1) ? x86_seg_tr : x86_seg_ldtr, > - src.val, 0, NULL, ctxt, ops)) != 0 ) > - goto done; > + if ( modrm_reg & 2 ) This needs to be (modrm_reg & 6) == 2. Otherwise, the /6 and /7 encodings will also raise #GP when they should raise #UD Actually thinking about it, could we just have a full switch here like other Grp $N decodes? ~Andrew > + { > + generate_exception_if(!mode_ring0(), EXC_GP, 0); > + if ( (rc = load_seg(seg, src.val, 0, NULL, ctxt, ops)) != 0 ) > + goto done; > + } > + else > + { > + struct segment_register reg; > + > + generate_exception_if(is_umip(ctxt, ops), EXC_GP, 0); > + fail_if(!ops->read_segment); > + if ( (rc = ops->read_segment(seg, ®, ctxt)) != 0 ) > + goto done; > + dst.val = reg.sel; > + if ( dst.type == OP_MEM ) > + dst.bytes = 2; > + } > break; > + } > > case X86EMUL_OPC(0x0f, 0x01): /* Grp7 */ { > struct segment_register reg; > @@ -4282,6 +4327,7 @@ x86_emulate( > case 0: /* sgdt */ > case 1: /* sidt */ > generate_exception_if(ea.type != OP_MEM, EXC_UD, -1); > + generate_exception_if(is_umip(ctxt, ops), EXC_GP, 0); > fail_if(ops->read_segment == NULL); > if ( (rc = ops->read_segment((modrm_reg & 1) ? > x86_seg_idtr : x86_seg_gdtr, > @@ -4316,6 +4362,7 @@ x86_emulate( > goto done; > break; > case 4: /* smsw */ > + generate_exception_if(is_umip(ctxt, ops), EXC_GP, 0); > ea.bytes = (ea.type == OP_MEM) ? 2 : op_bytes; > dst = ea; > fail_if(ops->read_cr == NULL); > >
>>> On 30.09.16 at 12:32, <andrew.cooper3@citrix.com> wrote: > On 08/09/16 14:42, Jan Beulich wrote: >> @@ -1484,6 +1485,17 @@ static bool is_aligned(enum x86_segment >> return !((reg.base + offs) & (size - 1)); >> } >> >> +static bool is_umip(struct x86_emulate_ctxt *ctxt, >> + const struct x86_emulate_ops *ops) > > is_umip is an odd way of phrasing this. umip_enabled() or > is_umip_enabled() would be better. That would have been my choice if there wasn't the CPL check in here. I prefer to read the 'p' here as "prevented" rather then "prevention". >> @@ -4177,14 +4205,31 @@ x86_emulate( >> } >> break; >> >> - case X86EMUL_OPC(0x0f, 0x00): /* Grp6 */ >> - fail_if((modrm_reg & 6) != 2); >> + case X86EMUL_OPC(0x0f, 0x00): /* Grp6 */ { > > Newline for { Yeah, we're kind of inconsistent with that when they are for case statements. >> + enum x86_segment seg = (modrm_reg & 1) ? x86_seg_tr : x86_seg_ldtr; >> + >> + fail_if(modrm_reg & 4); >> generate_exception_if(!in_protmode(ctxt, ops), EXC_UD, -1); >> - generate_exception_if(!mode_ring0(), EXC_GP, 0); >> - if ( (rc = load_seg((modrm_reg & 1) ? x86_seg_tr : x86_seg_ldtr, >> - src.val, 0, NULL, ctxt, ops)) != 0 ) >> - goto done; >> + if ( modrm_reg & 2 ) > > This needs to be (modrm_reg & 6) == 2. Otherwise, the /6 and /7 > encodings will also raise #GP when they should raise #UD Note the earlier "fail_if(modrm_reg & 4)". > Actually thinking about it, could we just have a full switch here like > other Grp $N decodes? I can certainly pull this ahead from a later (not yet submitted) patch. Jan
On 30/09/16 13:23, Jan Beulich wrote: >>>> On 30.09.16 at 12:32, <andrew.cooper3@citrix.com> wrote: >> On 08/09/16 14:42, Jan Beulich wrote: >>> @@ -1484,6 +1485,17 @@ static bool is_aligned(enum x86_segment >>> return !((reg.base + offs) & (size - 1)); >>> } >>> >>> +static bool is_umip(struct x86_emulate_ctxt *ctxt, >>> + const struct x86_emulate_ops *ops) >> is_umip is an odd way of phrasing this. umip_enabled() or >> is_umip_enabled() would be better. > That would have been my choice if there wasn't the CPL check in here. > I prefer to read the 'p' here as "prevented" rather then "prevention". In which case, umip_active()? >>> + enum x86_segment seg = (modrm_reg & 1) ? x86_seg_tr : x86_seg_ldtr; >>> + >>> + fail_if(modrm_reg & 4); >>> generate_exception_if(!in_protmode(ctxt, ops), EXC_UD, -1); >>> - generate_exception_if(!mode_ring0(), EXC_GP, 0); >>> - if ( (rc = load_seg((modrm_reg & 1) ? x86_seg_tr : x86_seg_ldtr, >>> - src.val, 0, NULL, ctxt, ops)) != 0 ) >>> - goto done; >>> + if ( modrm_reg & 2 ) >> This needs to be (modrm_reg & 6) == 2. Otherwise, the /6 and /7 >> encodings will also raise #GP when they should raise #UD > Note the earlier "fail_if(modrm_reg & 4)". Ah - I hadn't spotted that, which does catch that case, as well as ver{r,w}. > >> Actually thinking about it, could we just have a full switch here like >> other Grp $N decodes? > I can certainly pull this ahead from a later (not yet submitted) > patch. I think this would be best, especially if you already have the code to hand. ~Andrew
>>> On 30.09.16 at 14:27, <andrew.cooper3@citrix.com> wrote: > On 30/09/16 13:23, Jan Beulich wrote: >>>>> On 30.09.16 at 12:32, <andrew.cooper3@citrix.com> wrote: >>> On 08/09/16 14:42, Jan Beulich wrote: >>>> @@ -1484,6 +1485,17 @@ static bool is_aligned(enum x86_segment >>>> return !((reg.base + offs) & (size - 1)); >>>> } >>>> >>>> +static bool is_umip(struct x86_emulate_ctxt *ctxt, >>>> + const struct x86_emulate_ops *ops) >>> is_umip is an odd way of phrasing this. umip_enabled() or >>> is_umip_enabled() would be better. >> That would have been my choice if there wasn't the CPL check in here. >> I prefer to read the 'p' here as "prevented" rather then "prevention". > > In which case, umip_active()? Ah - that's fine. Jan
--- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -182,7 +182,7 @@ static const opcode_desc_t opcode_table[ static const opcode_desc_t twobyte_table[256] = { /* 0x00 - 0x07 */ - SrcMem16|ModRM, ImplicitOps|ModRM, ModRM, ModRM, + ModRM, ImplicitOps|ModRM, ModRM, ModRM, 0, ImplicitOps, ImplicitOps, ImplicitOps, /* 0x08 - 0x0F */ ImplicitOps, ImplicitOps, 0, ImplicitOps, @@ -421,6 +421,7 @@ typedef union { /* Control register flags. */ #define CR0_PE (1<<0) #define CR4_TSD (1<<2) +#define CR4_UMIP (1<<11) /* EFLAGS bit definitions. */ #define EFLG_VIP (1<<20) @@ -1484,6 +1485,17 @@ static bool is_aligned(enum x86_segment return !((reg.base + offs) & (size - 1)); } +static bool is_umip(struct x86_emulate_ctxt *ctxt, + const struct x86_emulate_ops *ops) +{ + unsigned long cr4; + + /* Intentionally not using mode_ring0() here to avoid its fail_if(). */ + return get_cpl(ctxt, ops) > 0 && + ops->read_cr && ops->read_cr(4, &cr4, ctxt) == X86EMUL_OKAY && + (cr4 & CR4_UMIP); +} + /* Inject a software interrupt/exception, emulating if needed. */ static int inject_swint(enum x86_swint_type type, uint8_t vector, uint8_t insn_len, @@ -2051,10 +2063,20 @@ x86_decode( break; case ext_0f: - case ext_0f3a: - case ext_8f08: - case ext_8f09: - case ext_8f0a: + switch ( b ) + { + case 0x00: /* Grp6 */ + switch ( modrm_reg & 6 ) + { + case 0: + d |= DstMem | SrcImplicit | Mov; + break; + case 2: case 4: + d |= SrcMem16; + break; + } + break; + } break; case ext_0f38: @@ -2070,6 +2092,12 @@ x86_decode( } break; + case ext_0f3a: + case ext_8f08: + case ext_8f09: + case ext_8f0a: + break; + default: ASSERT_UNREACHABLE(); } @@ -4177,14 +4205,31 @@ x86_emulate( } break; - case X86EMUL_OPC(0x0f, 0x00): /* Grp6 */ - fail_if((modrm_reg & 6) != 2); + case X86EMUL_OPC(0x0f, 0x00): /* Grp6 */ { + enum x86_segment seg = (modrm_reg & 1) ? x86_seg_tr : x86_seg_ldtr; + + fail_if(modrm_reg & 4); generate_exception_if(!in_protmode(ctxt, ops), EXC_UD, -1); - generate_exception_if(!mode_ring0(), EXC_GP, 0); - if ( (rc = load_seg((modrm_reg & 1) ? x86_seg_tr : x86_seg_ldtr, - src.val, 0, NULL, ctxt, ops)) != 0 ) - goto done; + if ( modrm_reg & 2 ) + { + generate_exception_if(!mode_ring0(), EXC_GP, 0); + if ( (rc = load_seg(seg, src.val, 0, NULL, ctxt, ops)) != 0 ) + goto done; + } + else + { + struct segment_register reg; + + generate_exception_if(is_umip(ctxt, ops), EXC_GP, 0); + fail_if(!ops->read_segment); + if ( (rc = ops->read_segment(seg, ®, ctxt)) != 0 ) + goto done; + dst.val = reg.sel; + if ( dst.type == OP_MEM ) + dst.bytes = 2; + } break; + } case X86EMUL_OPC(0x0f, 0x01): /* Grp7 */ { struct segment_register reg; @@ -4282,6 +4327,7 @@ x86_emulate( case 0: /* sgdt */ case 1: /* sidt */ generate_exception_if(ea.type != OP_MEM, EXC_UD, -1); + generate_exception_if(is_umip(ctxt, ops), EXC_GP, 0); fail_if(ops->read_segment == NULL); if ( (rc = ops->read_segment((modrm_reg & 1) ? x86_seg_idtr : x86_seg_gdtr, @@ -4316,6 +4362,7 @@ x86_emulate( goto done; break; case 4: /* smsw */ + generate_exception_if(is_umip(ctxt, ops), EXC_GP, 0); ea.bytes = (ea.type == OP_MEM) ? 2 : op_bytes; dst = ea; fail_if(ops->read_cr == NULL);