From patchwork Wed Apr 7 22:42:26 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Richard Henderson X-Patchwork-Id: 49668 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [199.232.76.165]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 6DE81B7D2B for ; Thu, 8 Apr 2010 09:15:02 +1000 (EST) Received: from localhost ([127.0.0.1]:56466 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1NzeQQ-0004kX-MK for incoming@patchwork.ozlabs.org; Wed, 07 Apr 2010 19:12:46 -0400 Received: from mailman by lists.gnu.org with tmda-scanned (Exim 4.43) id 1Nze3j-0005QO-1X for qemu-devel@nongnu.org; Wed, 07 Apr 2010 18:49:19 -0400 Received: from [140.186.70.92] (port=38005 helo=eggs.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1Nze3e-0005Nd-4i for qemu-devel@nongnu.org; Wed, 07 Apr 2010 18:49:18 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.69) (envelope-from ) id 1Nze3R-0003iK-51 for qemu-devel@nongnu.org; Wed, 07 Apr 2010 18:49:13 -0400 Received: from are.twiddle.net ([75.149.56.221]:44528) by eggs.gnu.org with esmtp (Exim 4.69) (envelope-from ) id 1Nze3Q-0003hG-4G for qemu-devel@nongnu.org; Wed, 07 Apr 2010 18:49:00 -0400 Received: by are.twiddle.net (Postfix, from userid 5000) id 7602DDE1; Wed, 7 Apr 2010 15:48:56 -0700 (PDT) Message-Id: <621ee3ce401119850ca1fc0dba298282234fcfe6.1270680209.git.rth@twiddle.net> In-Reply-To: References: From: Richard Henderson Date: Wed, 7 Apr 2010 15:42:26 -0700 To: qemu-devel@nongnu.org X-detected-operating-system: by eggs.gnu.org: GNU/Linux 2.6 (newer, 2) Cc: aurelien@aurel32.net Subject: [Qemu-devel] [PATCH 12/13] target-alpha: Fix load-locked/store-conditional. X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Use an exception plus start_exclusive to implement the compare-and-swap. This follows the example set by the MIPS and PPC ports. Signed-off-by: Richard Henderson --- linux-user/main.c | 55 +++++++++++++ target-alpha/cpu.h | 6 +- target-alpha/helper.c | 7 +- target-alpha/op_helper.c | 2 + target-alpha/translate.c | 202 ++++++++++++++++++++++++++++----------------- 5 files changed, 193 insertions(+), 79 deletions(-) diff --git a/linux-user/main.c b/linux-user/main.c index 5252881..61c3ed9 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -2348,6 +2348,51 @@ void cpu_loop(CPUM68KState *env) #endif /* TARGET_M68K */ #ifdef TARGET_ALPHA +static void do_store_exclusive(CPUAlphaState *env, int reg, int quad) +{ + target_ulong addr, val, tmp; + target_siginfo_t info; + int ret = 0; + + addr = env->lock_addr; + tmp = env->lock_st_addr; + env->lock_addr = -1; + env->lock_st_addr = 0; + + start_exclusive(); + mmap_lock(); + + if (addr == tmp) { + if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) { + goto do_sigsegv; + } + + if (val == env->lock_value) { + tmp = env->ir[reg]; + if (quad ? put_user_u64(tmp, addr) : put_user_u32(tmp, addr)) { + goto do_sigsegv; + } + ret = 1; + } + } + env->ir[reg] = ret; + env->pc += 4; + + mmap_unlock(); + end_exclusive(); + return; + + do_sigsegv: + mmap_unlock(); + end_exclusive(); + + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = addr; + queue_signal(env, TARGET_SIGSEGV, &info); +} + void cpu_loop (CPUState *env) { int trapnr; @@ -2372,6 +2417,7 @@ void cpu_loop (CPUState *env) exit(1); break; case EXCP_ARITH: + env->lock_addr = -1; info.si_signo = TARGET_SIGFPE; info.si_errno = 0; info.si_code = TARGET_FPE_FLTINV; @@ -2383,6 +2429,7 @@ void cpu_loop (CPUState *env) exit(1); break; case EXCP_DFAULT: + env->lock_addr = -1; info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = 0; /* ??? SEGV_MAPERR vs SEGV_ACCERR. */ @@ -2406,6 +2453,7 @@ void cpu_loop (CPUState *env) exit(1); break; case EXCP_UNALIGN: + env->lock_addr = -1; info.si_signo = TARGET_SIGBUS; info.si_errno = 0; info.si_code = TARGET_BUS_ADRALN; @@ -2414,6 +2462,7 @@ void cpu_loop (CPUState *env) break; case EXCP_OPCDEC: do_sigill: + env->lock_addr = -1; info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_ILLOPC; @@ -2424,6 +2473,7 @@ void cpu_loop (CPUState *env) /* No-op. Linux simply re-enables the FPU. */ break; case EXCP_CALL_PAL ... (EXCP_CALL_PALP - 1): + env->lock_addr = -1; switch ((trapnr >> 6) | 0x80) { case 0x80: /* BPT */ @@ -2513,11 +2563,16 @@ void cpu_loop (CPUState *env) case EXCP_DEBUG: info.si_signo = gdb_handlesig (env, TARGET_SIGTRAP); if (info.si_signo) { + env->lock_addr = -1; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, &info); } break; + case EXCP_STL_C: + case EXCP_STQ_C: + do_store_exclusive(env, env->error_code, trapnr - EXCP_STL_C); + break; default: printf ("Unhandled trap: 0x%x\n", trapnr); cpu_dump_state(env, stderr, fprintf, 0); diff --git a/target-alpha/cpu.h b/target-alpha/cpu.h index 3dd9888..dae23e2 100644 --- a/target-alpha/cpu.h +++ b/target-alpha/cpu.h @@ -355,11 +355,13 @@ struct CPUAlphaState { uint64_t ir[31]; float64 fir[31]; uint64_t pc; - uint64_t lock; uint32_t pcc[2]; uint64_t ipr[IPR_LAST]; uint64_t ps; uint64_t unique; + uint64_t lock_addr; + uint64_t lock_st_addr; + uint64_t lock_value; float_status fp_status; /* The following fields make up the FPCR, but in FP_STATUS format. */ uint8_t fpcr_exc_status; @@ -440,6 +442,8 @@ enum { /* Pseudo exception for console */ EXCP_CONSOLE_DISPATCH = 0x4001, EXCP_CONSOLE_FIXUP = 0x4002, + EXCP_STL_C = 0x4003, + EXCP_STQ_C = 0x4004, }; /* Arithmetic exception */ diff --git a/target-alpha/helper.c b/target-alpha/helper.c index 46335cd..1ed7ea3 100644 --- a/target-alpha/helper.c +++ b/target-alpha/helper.c @@ -556,12 +556,15 @@ void cpu_dump_state (CPUState *env, FILE *f, if ((i % 3) == 2) cpu_fprintf(f, "\n"); } - cpu_fprintf(f, "\n"); + + cpu_fprintf(f, "lock_a " TARGET_FMT_lx " lock_v " TARGET_FMT_lx "\n", + env->lock_addr, env->lock_value); + for (i = 0; i < 31; i++) { cpu_fprintf(f, "FIR%02d " TARGET_FMT_lx " ", i, *((uint64_t *)(&env->fir[i]))); if ((i % 3) == 2) cpu_fprintf(f, "\n"); } - cpu_fprintf(f, "\nlock " TARGET_FMT_lx "\n", env->lock); + cpu_fprintf(f, "\n"); } diff --git a/target-alpha/op_helper.c b/target-alpha/op_helper.c index a209130..bfc095c 100644 --- a/target-alpha/op_helper.c +++ b/target-alpha/op_helper.c @@ -1159,6 +1159,7 @@ void helper_hw_rei (void) env->pc = env->ipr[IPR_EXC_ADDR] & ~3; env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1; env->intr_flag = 0; + env->lock_addr = -1; /* XXX: re-enable interrupts and memory mapping */ } @@ -1167,6 +1168,7 @@ void helper_hw_ret (uint64_t a) env->pc = a & ~3; env->ipr[IPR_EXC_ADDR] = a & 1; env->intr_flag = 0; + env->lock_addr = -1; /* XXX: re-enable interrupts and memory mapping */ } diff --git a/target-alpha/translate.c b/target-alpha/translate.c index 5636f60..5a440bc 100644 --- a/target-alpha/translate.c +++ b/target-alpha/translate.c @@ -86,7 +86,9 @@ static TCGv_ptr cpu_env; static TCGv cpu_ir[31]; static TCGv cpu_fir[31]; static TCGv cpu_pc; -static TCGv cpu_lock; +static TCGv cpu_lock_addr; +static TCGv cpu_lock_st_addr; +static TCGv cpu_lock_value; #ifdef CONFIG_USER_ONLY static TCGv cpu_uniq; #endif @@ -123,8 +125,15 @@ static void alpha_translate_init(void) cpu_pc = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, pc), "pc"); - cpu_lock = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, lock), "lock"); + cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0, + offsetof(CPUState, lock_addr), + "lock_addr"); + cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0, + offsetof(CPUState, lock_st_addr), + "lock_st_addr"); + cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0, + offsetof(CPUState, lock_value), + "lock_value"); #ifdef CONFIG_USER_ONLY cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0, @@ -189,14 +198,16 @@ static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags) static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags) { - tcg_gen_mov_i64(cpu_lock, t1); tcg_gen_qemu_ld32s(t0, t1, flags); + tcg_gen_mov_i64(cpu_lock_addr, t1); + tcg_gen_mov_i64(cpu_lock_value, t0); } static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags) { - tcg_gen_mov_i64(cpu_lock, t1); tcg_gen_qemu_ld64(t0, t1, flags); + tcg_gen_mov_i64(cpu_lock_addr, t1); + tcg_gen_mov_i64(cpu_lock_value, t0); } static inline void gen_load_mem(DisasContext *ctx, @@ -205,25 +216,31 @@ static inline void gen_load_mem(DisasContext *ctx, int ra, int rb, int32_t disp16, int fp, int clear) { - TCGv addr; + TCGv addr, va; - if (unlikely(ra == 31)) + /* LDQ_U with ra $31 is UNOP. Other various loads are forms of + prefetches, which we can treat as nops. No worries about + missed exceptions here. */ + if (unlikely(ra == 31)) { return; + } addr = tcg_temp_new(); if (rb != 31) { tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); - if (clear) + if (clear) { tcg_gen_andi_i64(addr, addr, ~0x7); + } } else { - if (clear) + if (clear) { disp16 &= ~0x7; + } tcg_gen_movi_i64(addr, disp16); } - if (fp) - tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx); - else - tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx); + + va = (fp ? cpu_fir[ra] : cpu_ir[ra]); + tcg_gen_qemu_load(va, addr, ctx->mem_idx); + tcg_temp_free(addr); } @@ -257,73 +274,105 @@ static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags) tcg_temp_free_i32(tmp32); } -static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags) -{ - int l1, l2; - - l1 = gen_new_label(); - l2 = gen_new_label(); - tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1); - tcg_gen_qemu_st32(t0, t1, flags); - tcg_gen_movi_i64(t0, 1); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_movi_i64(t0, 0); - gen_set_label(l2); - tcg_gen_movi_i64(cpu_lock, -1); -} - -static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags) -{ - int l1, l2; - - l1 = gen_new_label(); - l2 = gen_new_label(); - tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1); - tcg_gen_qemu_st64(t0, t1, flags); - tcg_gen_movi_i64(t0, 1); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_movi_i64(t0, 0); - gen_set_label(l2); - tcg_gen_movi_i64(cpu_lock, -1); -} - static inline void gen_store_mem(DisasContext *ctx, void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1, int flags), int ra, int rb, int32_t disp16, int fp, - int clear, int local) + int clear) { - TCGv addr; - if (local) - addr = tcg_temp_local_new(); - else - addr = tcg_temp_new(); + TCGv addr, va; + + addr = tcg_temp_new(); if (rb != 31) { tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); - if (clear) + if (clear) { tcg_gen_andi_i64(addr, addr, ~0x7); + } } else { - if (clear) + if (clear) { disp16 &= ~0x7; + } tcg_gen_movi_i64(addr, disp16); } - if (ra != 31) { - if (fp) - tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx); - else - tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx); + + if (ra == 31) { + va = tcg_const_i64(0); } else { - TCGv zero; - if (local) - zero = tcg_const_local_i64(0); - else - zero = tcg_const_i64(0); - tcg_gen_qemu_store(zero, addr, ctx->mem_idx); - tcg_temp_free(zero); + va = (fp ? cpu_fir[ra] : cpu_ir[ra]); } + tcg_gen_qemu_store(va, addr, ctx->mem_idx); + tcg_temp_free(addr); + if (ra == 31) { + tcg_temp_free(va); + } +} + +static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb, + int32_t disp16, int quad) +{ + TCGv addr; + + if (ra == 31) { + /* ??? Don't bother storing anything. The user can't tell + the difference, since the zero register always reads zero. */ + return NO_EXIT; + } + +#if defined(CONFIG_USER_ONLY) + addr = cpu_lock_st_addr; +#else + addr = tcg_local_new(); +#endif + + if (rb != 31) { + tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); + } else { + tcg_gen_movi_i64(addr, disp16); + } + +#if defined(CONFIG_USER_ONLY) + /* ??? This is handled via a complicated version of compare-and-swap + in the cpu_loop. Hopefully one day we'll have a real CAS opcode + in TCG so that this isn't necessary. */ + return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra); +#else + /* ??? In system mode we are never multi-threaded, so CAS can be + implemented via a non-atomic load-compare-store sequence. */ + { + int lab_fail, lab_done; + TCGv val; + + lab_fail = gen_new_label(); + lab_done = gen_new_label(); + tcg_gen_brcond(TCG_COND_NE, addr, cpu_lock_addr, lab_fail); + + val = tcg_temp_new(); + if (quad) { + tcg_gen_qemu_ld64(val, addr, ctx->mem_idx); + } else { + tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx); + } + tcg_gen_brcond(TCG_COND_NE, val, cpu_lock_value, lab_fail); + + if (quad) { + tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx); + } else { + tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx); + } + tcg_gen_movi_i64(cpu_ir[ra], 1); + tcg_gen_br(lab_done); + + gen_set_label(lab_fail); + tcg_gen_movi_i64(cpu_ir[ra], 0); + + gen_set_label(lab_done); + tcg_gen_movi_i64(cpu_lock_addr, -1); + + tcg_temp_free(addr); + return NO_EXIT; + } +#endif } static int use_goto_tb(DisasContext *ctx, uint64_t dest) @@ -1534,15 +1583,15 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) break; case 0x0D: /* STW */ - gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0); + gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0); break; case 0x0E: /* STB */ - gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0); + gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0); break; case 0x0F: /* STQ_U */ - gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0); + gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1); break; case 0x10: switch (fn7) { @@ -2975,19 +3024,19 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) break; case 0x24: /* STF */ - gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0); + gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0); break; case 0x25: /* STG */ - gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0); + gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0); break; case 0x26: /* STS */ - gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0); + gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0); break; case 0x27: /* STT */ - gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0); + gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0); break; case 0x28: /* LDL */ @@ -3007,19 +3056,19 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) break; case 0x2C: /* STL */ - gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0); + gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0); break; case 0x2D: /* STQ */ - gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0); + gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0); break; case 0x2E: /* STL_C */ - gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1); + ret = gen_store_conditional(ctx, ra, rb, disp16, 0); break; case 0x2F: /* STQ_C */ - gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1); + ret = gen_store_conditional(ctx, ra, rb, disp16, 1); break; case 0x30: /* BR */ @@ -3285,6 +3334,7 @@ CPUAlphaState * cpu_alpha_init (const char *cpu_model) #else pal_init(env); #endif + env->lock_addr = -1; /* Initialize IPR */ #if defined (CONFIG_USER_ONLY)