@@ -326,6 +326,7 @@ static const struct {
{ { 0x00, 0x18 }, { 2, 2 }, T, R }, /* ltr */
{ { 0x00, 0x20 }, { 2, 2 }, T, R }, /* verr */
{ { 0x00, 0x28 }, { 2, 2 }, T, R }, /* verw */
+ { { 0x00, 0x30 }, { 0, 2 }, T, R, pfx_f2 }, /* lkgs */
{ { 0x01, 0x00 }, { 2, 2 }, F, W }, /* sgdt */
{ { 0x01, 0x08 }, { 2, 2 }, F, W }, /* sidt */
{ { 0x01, 0x10 }, { 2, 2 }, F, R }, /* lgdt */
@@ -672,6 +672,10 @@ static int blk(
return x86_emul_blk((void *)offset, p_data, bytes, eflags, state, ctxt);
}
+#ifdef __x86_64__
+static unsigned long gs_base, gs_base_shadow;
+#endif
+
static int read_segment(
enum x86_segment seg,
struct segment_register *reg,
@@ -681,8 +685,30 @@ static int read_segment(
return X86EMUL_UNHANDLEABLE;
memset(reg, 0, sizeof(*reg));
reg->p = 1;
+
+#ifdef __x86_64__
+ if ( seg == x86_seg_gs )
+ reg->base = gs_base;
+#endif
+
+ return X86EMUL_OKAY;
+}
+
+#ifdef __x86_64__
+static int write_segment(
+ enum x86_segment seg,
+ const struct segment_register *reg,
+ struct x86_emulate_ctxt *ctxt)
+{
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
+
+ if ( seg == x86_seg_gs )
+ gs_base = reg->base;
+
return X86EMUL_OKAY;
}
+#endif
static int read_msr(
unsigned int reg,
@@ -695,6 +721,20 @@ static int read_msr(
*val = ctxt->addr_size > 32 ? EFER_LME | EFER_LMA : 0;
return X86EMUL_OKAY;
+#ifdef __x86_64__
+ case MSR_GS_BASE:
+ if ( ctxt->addr_size < 64 )
+ break;
+ *val = gs_base;
+ return X86EMUL_OKAY;
+
+ case MSR_SHADOW_GS_BASE:
+ if ( ctxt->addr_size < 64 )
+ break;
+ *val = gs_base_shadow;
+ return X86EMUL_OKAY;
+#endif
+
case MSR_TSC_AUX:
#define TSC_AUX_VALUE 0xCACACACA
*val = TSC_AUX_VALUE;
@@ -704,6 +744,31 @@ static int read_msr(
return X86EMUL_UNHANDLEABLE;
}
+#ifdef __x86_64__
+static int write_msr(
+ unsigned int reg,
+ uint64_t val,
+ struct x86_emulate_ctxt *ctxt)
+{
+ switch ( reg )
+ {
+ case MSR_GS_BASE:
+ if ( ctxt->addr_size < 64 || !is_canonical_address(val) )
+ break;
+ gs_base = val;
+ return X86EMUL_OKAY;
+
+ case MSR_SHADOW_GS_BASE:
+ if ( ctxt->addr_size < 64 || !is_canonical_address(val) )
+ break;
+ gs_base_shadow = val;
+ return X86EMUL_OKAY;
+ }
+
+ return X86EMUL_UNHANDLEABLE;
+}
+#endif
+
#define INVPCID_ADDR 0x12345678
#define INVPCID_PCID 0x123
@@ -1338,6 +1403,41 @@ int main(int argc, char **argv)
printf("%u bytes read - ", bytes_read);
goto fail;
}
+ printf("okay\n");
+
+ emulops.write_segment = write_segment;
+ emulops.write_msr = write_msr;
+
+ printf("%-40s", "Testing swapgs...");
+ instr[0] = 0x0f; instr[1] = 0x01; instr[2] = 0xf8;
+ regs.eip = (unsigned long)&instr[0];
+ gs_base = 0xffffeeeecccc8888UL;
+ gs_base_shadow = 0x0000111122224444UL;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eip != (unsigned long)&instr[3]) ||
+ (gs_base != 0x0000111122224444UL) ||
+ (gs_base_shadow != 0xffffeeeecccc8888UL) )
+ goto fail;
+ printf("okay\n");
+
+ printf("%-40s", "Testing lkgs 2(%rdx)...");
+ instr[0] = 0xf2; instr[1] = 0x0f; instr[2] = 0x00; instr[3] = 0x72; instr[4] = 0x02;
+ regs.eip = (unsigned long)&instr[0];
+ regs.edx = (unsigned long)res;
+ res[0] = 0x00004444;
+ res[1] = 0x8888cccc;
+ i = cpu_policy.extd.nscb; cpu_policy.extd.nscb = true; /* for AMD */
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eip != (unsigned long)&instr[5]) ||
+ (gs_base != 0x0000111122224444UL) ||
+ gs_base_shadow )
+ goto fail;
+
+ cpu_policy.extd.nscb = i;
+ emulops.write_segment = NULL;
+ emulops.write_msr = NULL;
#endif
printf("okay\n");
@@ -85,6 +85,7 @@ bool emul_test_init(void)
cpu_policy.feat.invpcid = true;
cpu_policy.feat.adx = true;
cpu_policy.feat.rdpid = true;
+ cpu_policy.feat.lkgs = true;
cpu_policy.feat.wrmsrns = true;
cpu_policy.extd.clzero = true;
@@ -744,8 +744,12 @@ decode_twobyte(struct x86_emulate_state
case 0:
s->desc |= DstMem | SrcImplicit | Mov;
break;
+ case 6:
+ if ( !(s->modrm_reg & 1) && mode_64bit() )
+ {
case 2: case 4:
- s->desc |= SrcMem16;
+ s->desc |= SrcMem16;
+ }
break;
}
break;
@@ -594,6 +594,7 @@ amd_like(const struct x86_emulate_ctxt *
#define vcpu_has_avx_vnni() (ctxt->cpuid->feat.avx_vnni)
#define vcpu_has_avx512_bf16() (ctxt->cpuid->feat.avx512_bf16)
#define vcpu_has_cmpccxadd() (ctxt->cpuid->feat.cmpccxadd)
+#define vcpu_has_lkgs() (ctxt->cpuid->feat.lkgs)
#define vcpu_has_wrmsrns() (ctxt->cpuid->feat.wrmsrns)
#define vcpu_has_avx_ifma() (ctxt->cpuid->feat.avx_ifma)
#define vcpu_has_avx_vnni_int8() (ctxt->cpuid->feat.avx_vnni_int8)
@@ -2873,8 +2873,35 @@ x86_emulate(
break;
}
break;
- default:
- generate_exception_if(true, X86_EXC_UD);
+ case 6: /* lkgs */
+ generate_exception_if((modrm_reg & 1) || vex.pfx != vex_f2,
+ X86_EXC_UD);
+ generate_exception_if(!mode_64bit() || !mode_ring0(), X86_EXC_UD);
+ vcpu_must_have(lkgs);
+ fail_if(!ops->read_segment || !ops->read_msr ||
+ !ops->write_segment || !ops->write_msr);
+ if ( (rc = ops->read_msr(MSR_SHADOW_GS_BASE, &msr_val,
+ ctxt)) != X86EMUL_OKAY ||
+ (rc = ops->read_segment(x86_seg_gs, &sreg,
+ ctxt)) != X86EMUL_OKAY )
+ goto done;
+ dst.orig_val = sreg.base; /* Preserve full GS Base. */
+ if ( (rc = protmode_load_seg(x86_seg_gs, src.val, false, &sreg,
+ ctxt, ops)) != X86EMUL_OKAY ||
+ /* Write (32-bit) base into SHADOW_GS. */
+ (rc = ops->write_msr(MSR_SHADOW_GS_BASE, sreg.base,
+ ctxt)) != X86EMUL_OKAY )
+ goto done;
+ sreg.base = dst.orig_val; /* Reinstate full GS Base. */
+ if ( (rc = ops->write_segment(x86_seg_gs, &sreg,
+ ctxt)) != X86EMUL_OKAY )
+ {
+ /* Best effort unwind (i.e. no real error checking). */
+ if ( ops->write_msr(MSR_SHADOW_GS_BASE, msr_val,
+ ctxt) == X86EMUL_EXCEPTION )
+ x86_emul_reset_event(ctxt);
+ goto done;
+ }
break;
}
break;
@@ -307,7 +307,10 @@ XEN_CPUFEATURE(CMPCCXADD, 10*32+ 7) /
XEN_CPUFEATURE(FZRM, 10*32+10) /*A Fast Zero-length REP MOVSB */
XEN_CPUFEATURE(FSRS, 10*32+11) /*A Fast Short REP STOSB */
XEN_CPUFEATURE(FSRCS, 10*32+12) /*A Fast Short REP CMPSB/SCASB */
+XEN_CPUFEATURE(FRED, 10*32+17) /* Flexible Return and Event Delivery */
+XEN_CPUFEATURE(LKGS, 10*32+18) /*s Load Kernel GS Base */
XEN_CPUFEATURE(WRMSRNS, 10*32+19) /*S WRMSR Non-Serialising */
+XEN_CPUFEATURE(NMI_SRC, 10*32+20) /* NMI-source reporting */
XEN_CPUFEATURE(AMX_FP16, 10*32+21) /* AMX FP16 instruction */
XEN_CPUFEATURE(AVX_IFMA, 10*32+23) /*A AVX-IFMA Instructions */
@@ -274,7 +274,8 @@ def crunch_numbers(state):
# superpages, PCID and PKU are only available in 4 level paging.
# NO_LMSL indicates the absense of Long Mode Segment Limits, which
# have been dropped in hardware.
- LM: [CX16, PCID, LAHF_LM, PAGE1GB, PKU, NO_LMSL, AMX_TILE, CMPCCXADD],
+ LM: [CX16, PCID, LAHF_LM, PAGE1GB, PKU, NO_LMSL, AMX_TILE, CMPCCXADD,
+ LKGS],
# AMD K6-2+ and K6-III processors shipped with 3DNow+, beyond the
# standard 3DNow in the earlier K6 processors.
@@ -343,6 +344,9 @@ def crunch_numbers(state):
# computational instructions. All further AMX features are built on top
# of AMX-TILE.
AMX_TILE: [AMX_BF16, AMX_INT8, AMX_FP16, AMX_COMPLEX],
+
+ # FRED builds on the LKGS instruction.
+ LKGS: [FRED],
}
deep_features = tuple(sorted(deps.keys()))
Provide support for this insn, which is a prereq to FRED. CPUID-wise introduce both its, FRED's, and the NMI_SRC bit at this occasion, thus allowing to also express the dependency right away. While adding a testcase, also add a SWAPGS one. In order to not affect the behavior of pre-existing tests, install write_{segment,msr} hooks only transiently. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- Instead of ->read_segment() we could of course also use ->read_msr() to fetch the original GS base. I don't think I can see a clear advantage of either approach; the way it's done it matches how we handle SWAPGS. For PV save_segments() would need adjustment, but the insn being restricted to ring 0 means PV guests can't use it anyway (unless we wanted to emulate it as another privileged insn). --- v6: Use MSR constants in test harness. S->s in cpufeatureset.h. Add NMI_SRC feature bits. Re-base. v5: Re-base. v3: Add dependency on LM. Re-base. v2: Use X86_EXC_*. Add comments.