Message ID | 20220911222630.338330-2-pbonzini@redhat.com |
---|---|
State | New |
Headers | show |
Series | target/i386: two fixes for cmpxchg | expand |
On 9/11/22 23:26, Paolo Bonzini wrote: > +/* Compute the result of writing t0 to the OT-sized register REG. > + * > + * If DEST is NULL, store the result into the register and return the > + * register's TCGv. > + * > + * If DEST is not NULL, store the result into DEST and return the > + * register's TCGv. > + */ /* * Compute... Why bother passing NULL, and fixing it up at each use... > +static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0) > +{ > + gen_op_deposit_reg_v(s, ot, reg, NULL, t0); > } ... when you can just as easily pass in the register here? > > static inline > @@ -5495,26 +5514,36 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) > s->mem_index, ot | MO_LE); > gen_op_mov_reg_v(s, ot, R_EAX, oldv); > } else { > + gen_extu(ot, cmpv); > if (mod == 3) { > + TCGv dest; > rm = (modrm & 7) | REX_B(s); > gen_op_mov_v_reg(s, ot, oldv, rm); > + gen_extu(ot, oldv); > + > + /* > + * Unlike the memory case, where "the destination operand receives > + * a write cycle without regard to the result of the comparison", > + * rm must not be touched altogether if the write fails, including > + * not zero-extending it on 64-bit processors. So, precompute > + * the result of a successful writeback and perform the movcond > + * directly on cpu_regs. Also need to write accumulator first, in > + * case rm is part of RAX too. > + */ > + gen_op_mov_reg_v(s, ot, R_EAX, oldv); > + dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv); > + tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest); > } else { > gen_lea_modrm(env, s, modrm); > gen_op_ld_v(s, ot, oldv, s->A0); > - rm = 0; /* avoid warning */ > - } > - gen_extu(ot, oldv); > - gen_extu(ot, cmpv); > - /* store value = (old == cmp ? new : old); */ > - tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv); > - if (mod == 3) { > - gen_op_mov_reg_v(s, ot, R_EAX, oldv); > - gen_op_mov_reg_v(s, ot, rm, newv); > - } else { > - /* Perform an unconditional store cycle like physical cpu; > - must be before changing accumulator to ensure > - idempotency if the store faults and the instruction > - is restarted */ > + > + /* > + * Perform an unconditional store cycle like physical cpu; > + * must be before changing accumulator to ensure > + * idempotency if the store faults and the instruction > + * is restarted > + */ > + tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv); > gen_op_st_v(s, ot, newv, s->A0); > gen_op_mov_reg_v(s, ot, R_EAX, oldv); > } We have an outstanding bug report that suggests that the move to eax must use the deposit in both cases: https://gitlab.com/qemu-project/qemu/-/issues/508 r~
On 9/12/22 09:55, Richard Henderson wrote: > > * Compute... > > Why bother passing NULL, and fixing it up at each use... > >> +static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv >> t0) >> +{ >> + gen_op_deposit_reg_v(s, ot, reg, NULL, t0); >> } > > ... when you can just as easily pass in the register here? Because dest can be fixed up to either cpu_regs[reg - 4] for high-byte registers, or cpu_regs[reg] for everything else. > We have an outstanding bug report that suggests that the move to eax must use the deposit in both cases: > > https://gitlab.com/qemu-project/qemu/-/issues/508 Ok, so that's two bugs. But both of them can indeed be fixed with gen_op_deposit_reg_v. Paolo
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index d6420df31d..0e9237d627 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -429,32 +429,51 @@ static inline MemOp mo_b_d32(int b, MemOp ot) return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8; } -static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0) +/* Compute the result of writing t0 to the OT-sized register REG. + * + * If DEST is NULL, store the result into the register and return the + * register's TCGv. + * + * If DEST is not NULL, store the result into DEST and return the + * register's TCGv. + */ +static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0) { switch(ot) { case MO_8: - if (!byte_reg_is_xH(s, reg)) { - tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8); - } else { - tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8); + if (byte_reg_is_xH(s, reg)) { + dest = dest ? dest : cpu_regs[reg - 4]; + tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8); + return cpu_regs[reg - 4]; } + dest = dest ? dest : cpu_regs[reg]; + tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8); break; case MO_16: - tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16); + dest = dest ? dest : cpu_regs[reg]; + tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16); break; case MO_32: /* For x86_64, this sets the higher half of register to zero. For i386, this is equivalent to a mov. */ - tcg_gen_ext32u_tl(cpu_regs[reg], t0); + dest = dest ? dest : cpu_regs[reg]; + tcg_gen_ext32u_tl(dest, t0); break; #ifdef TARGET_X86_64 case MO_64: - tcg_gen_mov_tl(cpu_regs[reg], t0); + dest = dest ? dest : cpu_regs[reg]; + tcg_gen_mov_tl(dest, t0); break; #endif default: tcg_abort(); } + return cpu_regs[reg]; +} + +static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0) +{ + gen_op_deposit_reg_v(s, ot, reg, NULL, t0); } static inline @@ -5495,26 +5514,36 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) s->mem_index, ot | MO_LE); gen_op_mov_reg_v(s, ot, R_EAX, oldv); } else { + gen_extu(ot, cmpv); if (mod == 3) { + TCGv dest; rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(s, ot, oldv, rm); + gen_extu(ot, oldv); + + /* + * Unlike the memory case, where "the destination operand receives + * a write cycle without regard to the result of the comparison", + * rm must not be touched altogether if the write fails, including + * not zero-extending it on 64-bit processors. So, precompute + * the result of a successful writeback and perform the movcond + * directly on cpu_regs. Also need to write accumulator first, in + * case rm is part of RAX too. + */ + gen_op_mov_reg_v(s, ot, R_EAX, oldv); + dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv); + tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, oldv, s->A0); - rm = 0; /* avoid warning */ - } - gen_extu(ot, oldv); - gen_extu(ot, cmpv); - /* store value = (old == cmp ? new : old); */ - tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv); - if (mod == 3) { - gen_op_mov_reg_v(s, ot, R_EAX, oldv); - gen_op_mov_reg_v(s, ot, rm, newv); - } else { - /* Perform an unconditional store cycle like physical cpu; - must be before changing accumulator to ensure - idempotency if the store faults and the instruction - is restarted */ + + /* + * Perform an unconditional store cycle like physical cpu; + * must be before changing accumulator to ensure + * idempotency if the store faults and the instruction + * is restarted + */ + tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv); gen_op_st_v(s, ot, newv, s->A0); gen_op_mov_reg_v(s, ot, R_EAX, oldv); }
Unlike the memory case, where "the destination operand receives a write cycle without regard to the result of the comparison", rm must not be touched altogether if the write fails, including not zero-extending it on 64-bit processors. This is not how the movcond currently works, because it is always followed by a gen_op_mov_reg_v to rm. To fix it, introduce a new function that is similar to gen_op_mov_reg_v but writes to a TCG temporary. Considering that gen_extu(ot, oldv) is not needed in the memory case either, the two cases for register and memory destinations are different enough that one might as well fuse the two "if (mod == 3)" into one. So do that too. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- target/i386/tcg/translate.c | 73 ++++++++++++++++++++++++++----------- 1 file changed, 51 insertions(+), 22 deletions(-)