@@ -190,7 +190,8 @@ static const char *const str_7a1[32] =
[10] = "fzrm", [11] = "fsrs",
[12] = "fsrcs",
- /* 18 */ [19] = "wrmsrns",
+ /* 16 */ [17] = "fred",
+ [18] = "lkgs", [19] = "wrmsrns",
/* 22 */ [23] = "avx-ifma",
};
@@ -326,6 +326,7 @@ static const struct {
{ { 0x00, 0x18 }, { 2, 2 }, T, R }, /* ltr */
{ { 0x00, 0x20 }, { 2, 2 }, T, R }, /* verr */
{ { 0x00, 0x28 }, { 2, 2 }, T, R }, /* verw */
+ { { 0x00, 0x30 }, { 0, 2 }, T, R, pfx_f2 }, /* lkgs */
{ { 0x01, 0x00 }, { 2, 2 }, F, W }, /* sgdt */
{ { 0x01, 0x08 }, { 2, 2 }, F, W }, /* sidt */
{ { 0x01, 0x10 }, { 2, 2 }, F, R }, /* lgdt */
@@ -680,6 +680,10 @@ static int blk(
return x86_emul_blk((void *)offset, p_data, bytes, eflags, state, ctxt);
}
+#ifdef __x86_64__
+static unsigned long gs_base, gs_base_shadow;
+#endif
+
static int read_segment(
enum x86_segment seg,
struct segment_register *reg,
@@ -689,8 +693,30 @@ static int read_segment(
return X86EMUL_UNHANDLEABLE;
memset(reg, 0, sizeof(*reg));
reg->p = 1;
+
+#ifdef __x86_64__
+ if ( seg == x86_seg_gs )
+ reg->base = gs_base;
+#endif
+
+ return X86EMUL_OKAY;
+}
+
+#ifdef __x86_64__
+static int write_segment(
+ enum x86_segment seg,
+ const struct segment_register *reg,
+ struct x86_emulate_ctxt *ctxt)
+{
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
+
+ if ( seg == x86_seg_gs )
+ gs_base = reg->base;
+
return X86EMUL_OKAY;
}
+#endif
static int read_msr(
unsigned int reg,
@@ -703,6 +729,20 @@ static int read_msr(
*val = ctxt->addr_size > 32 ? 0x500 /* LME|LMA */ : 0;
return X86EMUL_OKAY;
+#ifdef __x86_64__
+ case 0xc0000101: /* GS_BASE */
+ if ( ctxt->addr_size < 64 )
+ break;
+ *val = gs_base;
+ return X86EMUL_OKAY;
+
+ case 0xc0000102: /* SHADOW_GS_BASE */
+ if ( ctxt->addr_size < 64 )
+ break;
+ *val = gs_base_shadow;
+ return X86EMUL_OKAY;
+#endif
+
case 0xc0000103: /* TSC_AUX */
#define TSC_AUX_VALUE 0xCACACACA
*val = TSC_AUX_VALUE;
@@ -712,6 +752,31 @@ static int read_msr(
return X86EMUL_UNHANDLEABLE;
}
+#ifdef __x86_64__
+static int write_msr(
+ unsigned int reg,
+ uint64_t val,
+ struct x86_emulate_ctxt *ctxt)
+{
+ switch ( reg )
+ {
+ case 0xc0000101: /* GS_BASE */
+ if ( ctxt->addr_size < 64 || !is_canonical_address(val) )
+ break;
+ gs_base = val;
+ return X86EMUL_OKAY;
+
+ case 0xc0000102: /* SHADOW_GS_BASE */
+ if ( ctxt->addr_size < 64 || !is_canonical_address(val) )
+ break;
+ gs_base_shadow = val;
+ return X86EMUL_OKAY;
+ }
+
+ return X86EMUL_UNHANDLEABLE;
+}
+#endif
+
#define INVPCID_ADDR 0x12345678
#define INVPCID_PCID 0x123
@@ -1345,6 +1410,41 @@ int main(int argc, char **argv)
printf("%u bytes read - ", bytes_read);
goto fail;
}
+ printf("okay\n");
+
+ emulops.write_segment = write_segment;
+ emulops.write_msr = write_msr;
+
+ printf("%-40s", "Testing swapgs...");
+ instr[0] = 0x0f; instr[1] = 0x01; instr[2] = 0xf8;
+ regs.eip = (unsigned long)&instr[0];
+ gs_base = 0xffffeeeecccc8888UL;
+ gs_base_shadow = 0x0000111122224444UL;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eip != (unsigned long)&instr[3]) ||
+ (gs_base != 0x0000111122224444UL) ||
+ (gs_base_shadow != 0xffffeeeecccc8888UL) )
+ goto fail;
+ printf("okay\n");
+
+ printf("%-40s", "Testing lkgs 2(%rdx)...");
+ instr[0] = 0xf2; instr[1] = 0x0f; instr[2] = 0x00; instr[3] = 0x72; instr[4] = 0x02;
+ regs.eip = (unsigned long)&instr[0];
+ regs.edx = (unsigned long)res;
+ res[0] = 0x00004444;
+ res[1] = 0x8888cccc;
+ i = cp.extd.nscb; cp.extd.nscb = true; /* for AMD */
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eip != (unsigned long)&instr[5]) ||
+ (gs_base != 0x0000111122224444UL) ||
+ gs_base_shadow )
+ goto fail;
+
+ cp.extd.nscb = i;
+ emulops.write_segment = NULL;
+ emulops.write_msr = NULL;
#endif
printf("okay\n");
@@ -86,6 +86,7 @@ bool emul_test_init(void)
cp.feat.adx = true;
cp.feat.avx512pf = cp.feat.avx512f;
cp.feat.rdpid = true;
+ cp.feat.lkgs = true;
cp.feat.wrmsrns = true;
cp.extd.clzero = true;
@@ -741,8 +741,12 @@ decode_twobyte(struct x86_emulate_state
case 0:
s->desc |= DstMem | SrcImplicit | Mov;
break;
+ case 6:
+ if ( !(s->modrm_reg & 1) && mode_64bit() )
+ {
case 2: case 4:
- s->desc |= SrcMem16;
+ s->desc |= SrcMem16;
+ }
break;
}
break;
@@ -589,6 +589,7 @@ amd_like(const struct x86_emulate_ctxt *
#define vcpu_has_avx512_fp16() (ctxt->cpuid->feat.avx512_fp16)
#define vcpu_has_avx_vnni() (ctxt->cpuid->feat.avx_vnni)
#define vcpu_has_avx512_bf16() (ctxt->cpuid->feat.avx512_bf16)
+#define vcpu_has_lkgs() (ctxt->cpuid->feat.lkgs)
#define vcpu_has_wrmsrns() (ctxt->cpuid->feat.wrmsrns)
#define vcpu_has_avx_ifma() (ctxt->cpuid->feat.avx_ifma)
#define vcpu_has_avx_vnni_int8() (ctxt->cpuid->feat.avx_vnni_int8)
@@ -2853,8 +2853,35 @@ x86_emulate(
break;
}
break;
- default:
- generate_exception_if(true, X86_EXC_UD);
+ case 6: /* lkgs */
+ generate_exception_if((modrm_reg & 1) || vex.pfx != vex_f2,
+ X86_EXC_UD);
+ generate_exception_if(!mode_64bit() || !mode_ring0(), X86_EXC_UD);
+ vcpu_must_have(lkgs);
+ fail_if(!ops->read_segment || !ops->read_msr ||
+ !ops->write_segment || !ops->write_msr);
+ if ( (rc = ops->read_msr(MSR_SHADOW_GS_BASE, &msr_val,
+ ctxt)) != X86EMUL_OKAY ||
+ (rc = ops->read_segment(x86_seg_gs, &sreg,
+ ctxt)) != X86EMUL_OKAY )
+ goto done;
+ dst.orig_val = sreg.base; /* Preserve full GS Base. */
+ if ( (rc = protmode_load_seg(x86_seg_gs, src.val, false, &sreg,
+ ctxt, ops)) != X86EMUL_OKAY ||
+ /* Write (32-bit) base into SHADOW_GS. */
+ (rc = ops->write_msr(MSR_SHADOW_GS_BASE, sreg.base,
+ ctxt)) != X86EMUL_OKAY )
+ goto done;
+ sreg.base = dst.orig_val; /* Reinstate full GS Base. */
+ if ( (rc = ops->write_segment(x86_seg_gs, &sreg,
+ ctxt)) != X86EMUL_OKAY )
+ {
+ /* Best effort unwind (i.e. no real error checking). */
+ if ( ops->write_msr(MSR_SHADOW_GS_BASE, msr_val,
+ ctxt) == X86EMUL_EXCEPTION )
+ x86_emul_reset_event(ctxt);
+ goto done;
+ }
break;
}
break;
@@ -282,6 +282,8 @@ XEN_CPUFEATURE(AVX512_BF16, 10*32+ 5) /
XEN_CPUFEATURE(FZRM, 10*32+10) /*A Fast Zero-length REP MOVSB */
XEN_CPUFEATURE(FSRS, 10*32+11) /*A Fast Short REP STOSB */
XEN_CPUFEATURE(FSRCS, 10*32+12) /*A Fast Short REP CMPSB/SCASB */
+XEN_CPUFEATURE(FRED, 10*32+17) /* Flexible Return and Event Delivery */
+XEN_CPUFEATURE(LKGS, 10*32+18) /*S Load Kernel GS Base */
XEN_CPUFEATURE(WRMSRNS, 10*32+19) /*S WRMSR Non-Serialising */
XEN_CPUFEATURE(AVX_IFMA, 10*32+23) /*A AVX-IFMA Instructions */
@@ -274,7 +274,7 @@ def crunch_numbers(state):
# superpages, PCID and PKU are only available in 4 level paging.
# NO_LMSL indicates the absense of Long Mode Segment Limits, which
# have been dropped in hardware.
- LM: [CX16, PCID, LAHF_LM, PAGE1GB, PKU, NO_LMSL],
+ LM: [CX16, PCID, LAHF_LM, PAGE1GB, PKU, NO_LMSL, LKGS],
# AMD K6-2+ and K6-III processors shipped with 3DNow+, beyond the
# standard 3DNow in the earlier K6 processors.
@@ -332,6 +332,9 @@ def crunch_numbers(state):
# The behaviour described by RRSBA depend on eIBRS being active.
EIBRS: [RRSBA],
+
+ # FRED builds on the LKGS instruction.
+ LKGS: [FRED],
}
deep_features = tuple(sorted(deps.keys()))
Provide support for this insn, which is a prereq to FRED. CPUID-wise introduce both its and FRED's bit at this occasion, thus allowing to also express the dependency right away. While adding a testcase, also add a SWAPGS one. In order to not affect the behavior of pre-existing tests, install write_{segment,msr} hooks only transiently. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- Instead of ->read_segment() we could of course also use ->read_msr() to fetch the original GS base. I don't think I can see a clear advantage of either approach; the way it's done it matches how we handle SWAPGS. For PV save_segments() would need adjustment, but the insn being restricted to ring 0 means PV guests can't use it anyway (unless we wanted to emulate it as another privileged insn). --- v3: Add dependency on LM. Re-base. v2: Use X86_EXC_*. Add comments.