@@ -1,5 +1,6 @@
DEF_HELPER_FLAGS_4(cc_compute_all, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int)
DEF_HELPER_FLAGS_4(cc_compute_c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int)
+DEF_HELPER_FLAGS_3(cc_compute_nz, TCG_CALL_NO_RWG_SE, tl, tl, tl, int)
DEF_HELPER_3(write_eflags, void, env, tl, i32)
DEF_HELPER_1(read_eflags, tl, env)
@@ -95,6 +95,19 @@ static target_ulong compute_all_adcox(target_ulong dst, target_ulong src1,
return (src1 & ~(CC_C | CC_O)) | (dst * CC_C) | (src2 * CC_O);
}
+target_ulong helper_cc_compute_nz(target_ulong dst, target_ulong src1,
+ int op)
+{
+ if (CC_OP_HAS_EFLAGS(op)) {
+ return ~src1 & CC_Z;
+ } else {
+ MemOp size = cc_op_size(op);
+ target_ulong mask = MAKE_64BIT_MASK(0, 8 << size);
+
+ return dst & mask;
+ }
+}
+
target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
target_ulong src2, int op)
{
@@ -1008,15 +1008,19 @@ static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
{
switch (s->cc_op) {
- case CC_OP_DYNAMIC:
- gen_compute_eflags(s);
- /* FALLTHRU */
case CC_OP_EFLAGS:
case CC_OP_ADCX:
case CC_OP_ADOX:
case CC_OP_ADCOX:
return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
.imm = CC_Z };
+ case CC_OP_DYNAMIC:
+ gen_update_cc_op(s);
+ if (!reg) {
+ reg = tcg_temp_new();
+ }
+ gen_helper_cc_compute_nz(reg, cpu_cc_dst, cpu_cc_src, cpu_cc_op);
+ return (CCPrepare) { .cond = TCG_COND_EQ, .reg = reg, .imm = 0 };
default:
{
MemOp size = cc_op_size(s->cc_op);
Most uses of CC_OP_DYNAMIC are for CMP/JB/JE or similar sequences. We can optimize many of them to avoid computation of the flags. This eliminates both TCG ops to set up the new cc_op, and helper instructions because evaluating just ZF is much cheaper. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- target/i386/helper.h | 1 + target/i386/tcg/cc_helper.c | 13 +++++++++++++ target/i386/tcg/translate.c | 10 +++++++--- 3 files changed, 21 insertions(+), 3 deletions(-)