diff mbox

[1/8] tcg: Clean up direct block chaining data fields

Message ID 1458815961-31979-2-git-send-email-sergey.fedorov@linaro.org
State New
Headers show

Commit Message

sergey.fedorov@linaro.org March 24, 2016, 10:39 a.m. UTC
From: Sergey Fedorov <serge.fdrv@gmail.com>

Briefly describe in a comment how direct block chaining is done. It
should help in understanding of the following data fields.

Rename some fields in TranslationBlock and TCGContext structures to
better reflect their purpose (dropping excessive 'tb_' prefix in
TranslationBlock but keeping it in TCGContext):
   tb_next_offset  =>  jmp_reset_offset
   tb_jmp_offset   =>  jmp_insn_offset
   tb_next         =>  jmp_target_addr
   jmp_next        =>  jmp_list_next
   jmp_first       =>  jmp_list_first

Avoid using a magic constant as an invalid offset which is used to
indicate that there's no n-th jump generated.

Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
---
 include/exec/exec-all.h      | 44 ++++++++++++++++++++++++--------------
 tcg/aarch64/tcg-target.inc.c |  7 +++---
 tcg/arm/tcg-target.inc.c     |  8 +++----
 tcg/i386/tcg-target.inc.c    |  8 +++----
 tcg/ia64/tcg-target.inc.c    |  6 +++---
 tcg/mips/tcg-target.inc.c    |  8 +++----
 tcg/ppc/tcg-target.inc.c     |  6 +++---
 tcg/s390/tcg-target.inc.c    | 11 +++++-----
 tcg/sparc/tcg-target.inc.c   |  9 ++++----
 tcg/tcg.h                    |  6 +++---
 tcg/tci/tcg-target.inc.c     | 10 ++++-----
 translate-all.c              | 51 +++++++++++++++++++++++---------------------
 12 files changed, 96 insertions(+), 78 deletions(-)

Comments

Alex Bennée March 24, 2016, 1:42 p.m. UTC | #1
sergey.fedorov@linaro.org writes:

> From: Sergey Fedorov <serge.fdrv@gmail.com>
>
> Briefly describe in a comment how direct block chaining is done. It
> should help in understanding of the following data fields.
>
> Rename some fields in TranslationBlock and TCGContext structures to
> better reflect their purpose (dropping excessive 'tb_' prefix in
> TranslationBlock but keeping it in TCGContext):
>    tb_next_offset  =>  jmp_reset_offset
>    tb_jmp_offset   =>  jmp_insn_offset
>    tb_next         =>  jmp_target_addr
>    jmp_next        =>  jmp_list_next
>    jmp_first       =>  jmp_list_first
>
> Avoid using a magic constant as an invalid offset which is used to
> indicate that there's no n-th jump generated.
>
> Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
> ---
>  include/exec/exec-all.h      | 44 ++++++++++++++++++++++++--------------
>  tcg/aarch64/tcg-target.inc.c |  7 +++---
>  tcg/arm/tcg-target.inc.c     |  8 +++----
>  tcg/i386/tcg-target.inc.c    |  8 +++----
>  tcg/ia64/tcg-target.inc.c    |  6 +++---
>  tcg/mips/tcg-target.inc.c    |  8 +++----
>  tcg/ppc/tcg-target.inc.c     |  6 +++---
>  tcg/s390/tcg-target.inc.c    | 11 +++++-----
>  tcg/sparc/tcg-target.inc.c   |  9 ++++----
>  tcg/tcg.h                    |  6 +++---
>  tcg/tci/tcg-target.inc.c     | 10 ++++-----
>  translate-all.c              | 51 +++++++++++++++++++++++---------------------
>  12 files changed, 96 insertions(+), 78 deletions(-)
>
> diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
> index 05a151da4a54..cc3d2ca25917 100644
> --- a/include/exec/exec-all.h
> +++ b/include/exec/exec-all.h
> @@ -257,20 +257,32 @@ struct TranslationBlock {
>      struct TranslationBlock *page_next[2];
>      tb_page_addr_t page_addr[2];
>
> -    /* the following data are used to directly call another TB from
> -       the code of this one. */
> -    uint16_t tb_next_offset[2]; /* offset of original jump target */
> +    /* The following data are used to directly call another TB from
> +     * the code of this one. This can be done either by emitting direct or
> +     * indirect native jump instructions. These jumps are reset so that the TB
> +     * just continue its execution. The TB can be linked to another one by
> +     * setting one of the jump targets (or patching the jump instruction). Only
> +     * two of such jumps are supported.
> +     */
> +    uint16_t jmp_reset_offset[2]; /* offset of original jump target */
> +#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
>  #ifdef USE_DIRECT_JUMP
> -    uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
> +    uint16_t jmp_insn_offset[2]; /* offset of native jump instruction */
>  #else
> -    uintptr_t tb_next[2]; /* address of jump generated code */
> +    uintptr_t jmp_target_addr[2]; /* target address for indirect jump */
>  #endif
> -    /* list of TBs jumping to this one. This is a circular list using
> -       the two least significant bits of the pointers to tell what is
> -       the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
> -       jmp_first */
> -    struct TranslationBlock *jmp_next[2];
> -    struct TranslationBlock *jmp_first;
> +    /* Each TB has an assosiated circular list of TBs jumping to this one.
> +     * jmp_list_first points to the first TB jumping to this one.
> +     * jmp_list_next is used to point to the next TB in a list.
> +     * Since each TB can have two jumps, it can participate in two lists.
> +     * The two least significant bits of a pointer are used to choose which
> +     * data field holds a pointer to the next TB:
> +     * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first.
> +     * In other words, 0/1 tells which jump is used in the pointed TB,
> +     * and 2 means that this is a pointer back to the target TB of this list.
> +     */
> +    struct TranslationBlock *jmp_list_next[2];
> +    struct TranslationBlock *jmp_list_first;

OK I found that tricky to follow. Where does the value of the pointer
come from that sets these bottom bits? The TB jumping to this TB sets it?

>  };
>
>  #include "qemu/thread.h"
> @@ -359,7 +371,7 @@ void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
>  static inline void tb_set_jmp_target(TranslationBlock *tb,
>                                       int n, uintptr_t addr)
>  {
> -    uint16_t offset = tb->tb_jmp_offset[n];
> +    uint16_t offset = tb->jmp_insn_offset[n];
>      tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
>  }
>
> @@ -369,7 +381,7 @@ static inline void tb_set_jmp_target(TranslationBlock *tb,
>  static inline void tb_set_jmp_target(TranslationBlock *tb,
>                                       int n, uintptr_t addr)
>  {
> -    tb->tb_next[n] = addr;
> +    tb->jmp_target_addr[n] = addr;
>  }
>
>  #endif
> @@ -378,13 +390,13 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
>                                 TranslationBlock *tb_next)
>  {
>      /* NOTE: this test is only needed for thread safety */
> -    if (!tb->jmp_next[n]) {
> +    if (!tb->jmp_list_next[n]) {
>          /* patch the native jump address */
>          tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
>
>          /* add in TB jmp circular list */
> -        tb->jmp_next[n] = tb_next->jmp_first;
> -        tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
> +        tb->jmp_list_next[n] = tb_next->jmp_list_first;
> +        tb_next->jmp_list_first = (TranslationBlock *)((uintptr_t)tb | n);
>      }
>  }
>
> diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
> index 0ed10a974121..08efdf41da48 100644
> --- a/tcg/aarch64/tcg-target.inc.c
> +++ b/tcg/aarch64/tcg-target.inc.c
> @@ -1294,12 +1294,13 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
>  #ifndef USE_DIRECT_JUMP
>  #error "USE_DIRECT_JUMP required for aarch64"
>  #endif
> -        assert(s->tb_jmp_offset != NULL); /* consistency for USE_DIRECT_JUMP */
> -        s->tb_jmp_offset[a0] = tcg_current_code_size(s);
> +        /* consistency for USE_DIRECT_JUMP */
> +        assert(s->tb_jmp_insn_offset != NULL);
> +        s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
>          /* actual branch destination will be patched by
>             aarch64_tb_set_jmp_target later, beware retranslation. */
>          tcg_out_goto_noaddr(s);
> -        s->tb_next_offset[a0] = tcg_current_code_size(s);
> +        s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
>          break;
>
>      case INDEX_op_br:
> diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
> index 3edf6a6f971c..a9147620b073 100644
> --- a/tcg/arm/tcg-target.inc.c
> +++ b/tcg/arm/tcg-target.inc.c
> @@ -1647,17 +1647,17 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
>          tcg_out_goto(s, COND_AL, tb_ret_addr);
>          break;
>      case INDEX_op_goto_tb:
> -        if (s->tb_jmp_offset) {
> +        if (s->tb_jmp_insn_offset) {
>              /* Direct jump method */
> -            s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
> +            s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
>              tcg_out_b_noaddr(s, COND_AL);
>          } else {
>              /* Indirect jump method */
> -            intptr_t ptr = (intptr_t)(s->tb_next + args[0]);
> +            intptr_t ptr = (intptr_t)(s->tb_jmp_target_addr + args[0]);
>              tcg_out_movi32(s, COND_AL, TCG_REG_R0, ptr & ~0xfff);
>              tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, ptr & 0xfff);
>          }
> -        s->tb_next_offset[args[0]] = tcg_current_code_size(s);
> +        s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
>          break;
>      case INDEX_op_br:
>          tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
> diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
> index 9187d34caf6d..2f98cae97f3b 100644
> --- a/tcg/i386/tcg-target.inc.c
> +++ b/tcg/i386/tcg-target.inc.c
> @@ -1775,17 +1775,17 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
>          tcg_out_jmp(s, tb_ret_addr);
>          break;
>      case INDEX_op_goto_tb:
> -        if (s->tb_jmp_offset) {
> +        if (s->tb_jmp_insn_offset) {
>              /* direct jump method */
>              tcg_out8(s, OPC_JMP_long); /* jmp im */
> -            s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
> +            s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
>              tcg_out32(s, 0);
>          } else {
>              /* indirect jump method */
>              tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
> -                                 (intptr_t)(s->tb_next + args[0]));
> +                                 (intptr_t)(s->tb_jmp_target_addr + args[0]));
>          }
> -        s->tb_next_offset[args[0]] = tcg_current_code_size(s);
> +        s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
>          break;
>      case INDEX_op_br:
>          tcg_out_jxx(s, JCC_JMP, arg_label(args[0]), 0);
> diff --git a/tcg/ia64/tcg-target.inc.c b/tcg/ia64/tcg-target.inc.c
> index 62d654943c20..261861f90c3a 100644
> --- a/tcg/ia64/tcg-target.inc.c
> +++ b/tcg/ia64/tcg-target.inc.c
> @@ -881,13 +881,13 @@ static void tcg_out_exit_tb(TCGContext *s, tcg_target_long arg)
>
>  static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
>  {
> -    if (s->tb_jmp_offset) {
> +    if (s->tb_jmp_insn_offset) {
>          /* direct jump method */
>          tcg_abort();
>      } else {
>          /* indirect jump method */
>          tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2,
> -                     (tcg_target_long)(s->tb_next + arg));
> +                     (tcg_target_long)(s->tb_jmp_target_addr + arg));
>          tcg_out_bundle(s, MmI,
>                         tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1,
>                                     TCG_REG_R2, TCG_REG_R2),
> @@ -900,7 +900,7 @@ static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
>                         tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4,
>                                     TCG_REG_B6));
>      }
> -    s->tb_next_offset[arg] = tcg_current_code_size(s);
> +    s->tb_jmp_reset_offset[arg] = tcg_current_code_size(s);
>  }
>
>  static inline void tcg_out_jmp(TCGContext *s, TCGArg addr)
> diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c
> index 297bd00910b7..b2a839ac2cd3 100644
> --- a/tcg/mips/tcg-target.inc.c
> +++ b/tcg/mips/tcg-target.inc.c
> @@ -1397,19 +1397,19 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
>          }
>          break;
>      case INDEX_op_goto_tb:
> -        if (s->tb_jmp_offset) {
> +        if (s->tb_jmp_insn_offset) {
>              /* direct jump method */
> -            s->tb_jmp_offset[a0] = tcg_current_code_size(s);
> +            s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
>              /* Avoid clobbering the address during retranslation.  */
>              tcg_out32(s, OPC_J | (*(uint32_t *)s->code_ptr & 0x3ffffff));
>          } else {
>              /* indirect jump method */
>              tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
> -                       (uintptr_t)(s->tb_next + a0));
> +                       (uintptr_t)(s->tb_jmp_target_addr + a0));
>              tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
>          }
>          tcg_out_nop(s);
> -        s->tb_next_offset[a0] = tcg_current_code_size(s);
> +        s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
>          break;
>      case INDEX_op_br:
>          tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO,
> diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
> index 8c1c2dfa9b22..10394079ad9d 100644
> --- a/tcg/ppc/tcg-target.inc.c
> +++ b/tcg/ppc/tcg-target.inc.c
> @@ -1894,17 +1894,17 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
>          tcg_out_b(s, 0, tb_ret_addr);
>          break;
>      case INDEX_op_goto_tb:
> -        tcg_debug_assert(s->tb_jmp_offset);
> +        tcg_debug_assert(s->tb_jmp_insn_offset);
>          /* Direct jump.  Ensure the next insns are 8-byte aligned. */
>          if ((uintptr_t)s->code_ptr & 7) {
>              tcg_out32(s, NOP);
>          }
> -        s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
> +        s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
>          /* To be replaced by either a branch+nop or a load into TMP1.  */
>          s->code_ptr += 2;
>          tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
>          tcg_out32(s, BCCTR | BO_ALWAYS);
> -        s->tb_next_offset[args[0]] = tcg_current_code_size(s);
> +        s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
>          break;
>      case INDEX_op_br:
>          {
> diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c
> index fbf97bb2e15d..e95b04b0e278 100644
> --- a/tcg/s390/tcg-target.inc.c
> +++ b/tcg/s390/tcg-target.inc.c
> @@ -1715,17 +1715,18 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
>          break;
>
>      case INDEX_op_goto_tb:
> -        if (s->tb_jmp_offset) {
> +        if (s->tb_jmp_insn_offset) {
>              tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
> -            s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
> +            s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
>              s->code_ptr += 2;
>          } else {
> -            /* load address stored at s->tb_next + args[0] */
> -            tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
> +            /* load address stored at s->tb_jmp_target_addr + args[0] */
> +            tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0,
> +                           s->tb_jmp_target_addr + args[0]);
>              /* and go there */
>              tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
>          }
> -        s->tb_next_offset[args[0]] = tcg_current_code_size(s);
> +        s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
>          break;
>
>      OP_32_64(ld8u):
> diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c
> index 54df1bc42432..a611885a2aaf 100644
> --- a/tcg/sparc/tcg-target.inc.c
> +++ b/tcg/sparc/tcg-target.inc.c
> @@ -1229,18 +1229,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
>          }
>          break;
>      case INDEX_op_goto_tb:
> -        if (s->tb_jmp_offset) {
> +        if (s->tb_jmp_insn_offset) {
>              /* direct jump method */
> -            s->tb_jmp_offset[a0] = tcg_current_code_size(s);
> +            s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
>              /* Make sure to preserve links during retranslation.  */
>              tcg_out32(s, CALL | (*s->code_ptr & ~INSN_OP(-1)));
>          } else {
>              /* indirect jump method */
> -            tcg_out_ld_ptr(s, TCG_REG_T1, (uintptr_t)(s->tb_next + a0));
> +            tcg_out_ld_ptr(s, TCG_REG_T1,
> +                           (uintptr_t)(s->tb_jmp_target_addr + a0));
>              tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL);
>          }
>          tcg_out_nop(s);
> -        s->tb_next_offset[a0] = tcg_current_code_size(s);
> +        s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
>          break;
>      case INDEX_op_br:
>          tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
> diff --git a/tcg/tcg.h b/tcg/tcg.h
> index b83f76351ccd..f9d11b29442b 100644
> --- a/tcg/tcg.h
> +++ b/tcg/tcg.h
> @@ -510,9 +510,9 @@ struct TCGContext {
>
>      /* goto_tb support */
>      tcg_insn_unit *code_buf;
> -    uintptr_t *tb_next;
> -    uint16_t *tb_next_offset;
> -    uint16_t *tb_jmp_offset; /* != NULL if USE_DIRECT_JUMP */
> +    uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
> +    uint16_t *tb_jmp_insn_offset; /* tb->jmp_insn_offset if USE_DIRECT_JUMP */
> +    uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_addr if !USE_DIRECT_JUMP */
>
>      /* liveness analysis */
>      uint16_t *op_dead_args; /* for each operation, each bit tells if the
> diff --git a/tcg/tci/tcg-target.inc.c b/tcg/tci/tcg-target.inc.c
> index 4afe4d7a8d59..4e91687abd1c 100644
> --- a/tcg/tci/tcg-target.inc.c
> +++ b/tcg/tci/tcg-target.inc.c
> @@ -553,17 +553,17 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
>          tcg_out64(s, args[0]);
>          break;
>      case INDEX_op_goto_tb:
> -        if (s->tb_jmp_offset) {
> +        if (s->tb_jmp_insn_offset) {
>              /* Direct jump method. */
> -            assert(args[0] < ARRAY_SIZE(s->tb_jmp_offset));
> -            s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
> +            assert(args[0] < ARRAY_SIZE(s->tb_jmp_insn_offset));
> +            s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
>              tcg_out32(s, 0);
>          } else {
>              /* Indirect jump method. */
>              TODO();
>          }
> -        assert(args[0] < ARRAY_SIZE(s->tb_next_offset));
> -        s->tb_next_offset[args[0]] = tcg_current_code_size(s);
> +        assert(args[0] < ARRAY_SIZE(s->tb_jmp_reset_offset));
> +        s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
>          break;
>      case INDEX_op_br:
>          tci_out_label(s, arg_label(args[0]));
> diff --git a/translate-all.c b/translate-all.c
> index e9f409b762ab..31cdf8ae217e 100644
> --- a/translate-all.c
> +++ b/translate-all.c
> @@ -929,7 +929,7 @@ static inline void tb_jmp_remove(TranslationBlock *tb, int n)
>      TranslationBlock *tb1, **ptb;
>      unsigned int n1;
>
> -    ptb = &tb->jmp_next[n];
> +    ptb = &tb->jmp_list_next[n];
>      tb1 = *ptb;
>      if (tb1) {
>          /* find tb(n) in circular list */
> @@ -941,15 +941,15 @@ static inline void tb_jmp_remove(TranslationBlock *tb, int n)
>                  break;
>              }
>              if (n1 == 2) {
> -                ptb = &tb1->jmp_first;
> +                ptb = &tb1->jmp_list_first;
>              } else {
> -                ptb = &tb1->jmp_next[n1];
> +                ptb = &tb1->jmp_list_next[n1];
>              }
>          }
>          /* now we can suppress tb(n) from the list */
> -        *ptb = tb->jmp_next[n];
> +        *ptb = tb->jmp_list_next[n];
>
> -        tb->jmp_next[n] = NULL;
> +        tb->jmp_list_next[n] = NULL;
>      }
>  }
>
> @@ -957,7 +957,8 @@ static inline void tb_jmp_remove(TranslationBlock *tb, int n)
>     another TB */
>  static inline void tb_reset_jump(TranslationBlock *tb, int n)
>  {
> -    tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
> +    uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
> +    tb_set_jmp_target(tb, n, addr);
>  }
>
>  /* invalidate one TB */
> @@ -1001,19 +1002,21 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
>      tb_jmp_remove(tb, 1);
>
>      /* suppress any remaining jumps to this TB */
> -    tb1 = tb->jmp_first;
> +    tb1 = tb->jmp_list_first;
>      for (;;) {
>          n1 = (uintptr_t)tb1 & 3;
>          if (n1 == 2) {
>              break;
>          }
>          tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
> -        tb2 = tb1->jmp_next[n1];
> +        tb2 = tb1->jmp_list_next[n1];
>          tb_reset_jump(tb1, n1);
> -        tb1->jmp_next[n1] = NULL;
> +        tb1->jmp_list_next[n1] = NULL;
>          tb1 = tb2;
>      }
> -    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
> +
> +    /* fail safe */
> +    tb->jmp_list_first = (TranslationBlock *)((uintptr_t)tb | 2);
>
>      tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
>  }
> @@ -1098,15 +1101,15 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
>      trace_translate_block(tb, tb->pc, tb->tc_ptr);
>
>      /* generate machine code */
> -    tb->tb_next_offset[0] = 0xffff;
> -    tb->tb_next_offset[1] = 0xffff;
> -    tcg_ctx.tb_next_offset = tb->tb_next_offset;
> +    tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
> +    tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
> +    tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
>  #ifdef USE_DIRECT_JUMP
> -    tcg_ctx.tb_jmp_offset = tb->tb_jmp_offset;
> -    tcg_ctx.tb_next = NULL;
> +    tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
> +    tcg_ctx.tb_jmp_target_addr = NULL;
>  #else
> -    tcg_ctx.tb_jmp_offset = NULL;
> -    tcg_ctx.tb_next = tb->tb_next;
> +    tcg_ctx.tb_jmp_insn_offset = NULL;
> +    tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
>  #endif
>
>  #ifdef CONFIG_PROFILER
> @@ -1486,15 +1489,15 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
>          tb->page_addr[1] = -1;
>      }
>
> -    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
> -    tb->jmp_next[0] = NULL;
> -    tb->jmp_next[1] = NULL;
> +    tb->jmp_list_first = (TranslationBlock *)((uintptr_t)tb | 2);
> +    tb->jmp_list_next[0] = NULL;
> +    tb->jmp_list_next[1] = NULL;
>
>      /* init original jump addresses */
> -    if (tb->tb_next_offset[0] != 0xffff) {
> +    if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
>          tb_reset_jump(tb, 0);
>      }
> -    if (tb->tb_next_offset[1] != 0xffff) {
> +    if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
>          tb_reset_jump(tb, 1);
>      }
>
> @@ -1687,9 +1690,9 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
>          if (tb->page_addr[1] != -1) {
>              cross_page++;
>          }
> -        if (tb->tb_next_offset[0] != 0xffff) {
> +        if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
>              direct_jmp_count++;
> -            if (tb->tb_next_offset[1] != 0xffff) {
> +            if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
>                  direct_jmp2_count++;
>              }
>          }


--
Alex Bennée
Sergey Fedorov March 24, 2016, 2:02 p.m. UTC | #2
On 24/03/16 16:42, Alex Bennée wrote:
>> diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
>> > index 05a151da4a54..cc3d2ca25917 100644
>> > --- a/include/exec/exec-all.h
>> > +++ b/include/exec/exec-all.h
>> > @@ -257,20 +257,32 @@ struct TranslationBlock {
>> >      struct TranslationBlock *page_next[2];
>> >      tb_page_addr_t page_addr[2];
>> >
>> > -    /* the following data are used to directly call another TB from
>> > -       the code of this one. */
>> > -    uint16_t tb_next_offset[2]; /* offset of original jump target */
>> > +    /* The following data are used to directly call another TB from
>> > +     * the code of this one. This can be done either by emitting direct or
>> > +     * indirect native jump instructions. These jumps are reset so that the TB
>> > +     * just continue its execution. The TB can be linked to another one by
>> > +     * setting one of the jump targets (or patching the jump instruction). Only
>> > +     * two of such jumps are supported.
>> > +     */
>> > +    uint16_t jmp_reset_offset[2]; /* offset of original jump target */
>> > +#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
>> >  #ifdef USE_DIRECT_JUMP
>> > -    uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
>> > +    uint16_t jmp_insn_offset[2]; /* offset of native jump instruction */
>> >  #else
>> > -    uintptr_t tb_next[2]; /* address of jump generated code */
>> > +    uintptr_t jmp_target_addr[2]; /* target address for indirect jump */
>> >  #endif
>> > -    /* list of TBs jumping to this one. This is a circular list using
>> > -       the two least significant bits of the pointers to tell what is
>> > -       the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
>> > -       jmp_first */
>> > -    struct TranslationBlock *jmp_next[2];
>> > -    struct TranslationBlock *jmp_first;
>> > +    /* Each TB has an assosiated circular list of TBs jumping to this one.
>> > +     * jmp_list_first points to the first TB jumping to this one.
>> > +     * jmp_list_next is used to point to the next TB in a list.
>> > +     * Since each TB can have two jumps, it can participate in two lists.
>> > +     * The two least significant bits of a pointer are used to choose which
>> > +     * data field holds a pointer to the next TB:
>> > +     * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first.
>> > +     * In other words, 0/1 tells which jump is used in the pointed TB,
>> > +     * and 2 means that this is a pointer back to the target TB of this list.
>> > +     */
>> > +    struct TranslationBlock *jmp_list_next[2];
>> > +    struct TranslationBlock *jmp_list_first;
> OK I found that tricky to follow. Where does the value of the pointer
> come from that sets these bottom bits? The TB jumping to this TB sets it?

Yeah, that's not easy to describe. Initially, we set:

    tb->jmp_list_first = tb | 2

That makes an empty list: jmp_list_first just points to the this TB and
the low bits are 2.

After that we can add a TB to the list in tb_add_jump():

    tb->jmp_list_next[n] = tb_next->jmp_list_first;
    tb_next->jmp_list_first = tb | n;

where 'tb' is going to jump to 'tb_next', 'n' (can be 0 or 1) is an
index of jump target of 'tb'.

(I simplified the code here)

Any ideas how to make it more clear in the comment?

Kind regards,
Sergey
Alex Bennée March 24, 2016, 3:01 p.m. UTC | #3
Sergey Fedorov <serge.fdrv@gmail.com> writes:

> On 24/03/16 16:42, Alex Bennée wrote:
>>> diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
>>> > index 05a151da4a54..cc3d2ca25917 100644
>>> > --- a/include/exec/exec-all.h
>>> > +++ b/include/exec/exec-all.h
>>> > @@ -257,20 +257,32 @@ struct TranslationBlock {
>>> >      struct TranslationBlock *page_next[2];
>>> >      tb_page_addr_t page_addr[2];
>>> >
>>> > -    /* the following data are used to directly call another TB from
>>> > -       the code of this one. */
>>> > -    uint16_t tb_next_offset[2]; /* offset of original jump target */
>>> > +    /* The following data are used to directly call another TB from
>>> > +     * the code of this one. This can be done either by emitting direct or
>>> > +     * indirect native jump instructions. These jumps are reset so that the TB
>>> > +     * just continue its execution. The TB can be linked to another one by
>>> > +     * setting one of the jump targets (or patching the jump instruction). Only
>>> > +     * two of such jumps are supported.
>>> > +     */
>>> > +    uint16_t jmp_reset_offset[2]; /* offset of original jump target */
>>> > +#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
>>> >  #ifdef USE_DIRECT_JUMP
>>> > -    uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
>>> > +    uint16_t jmp_insn_offset[2]; /* offset of native jump instruction */
>>> >  #else
>>> > -    uintptr_t tb_next[2]; /* address of jump generated code */
>>> > +    uintptr_t jmp_target_addr[2]; /* target address for indirect jump */
>>> >  #endif
>>> > -    /* list of TBs jumping to this one. This is a circular list using
>>> > -       the two least significant bits of the pointers to tell what is
>>> > -       the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
>>> > -       jmp_first */
>>> > -    struct TranslationBlock *jmp_next[2];
>>> > -    struct TranslationBlock *jmp_first;
>>> > +    /* Each TB has an assosiated circular list of TBs jumping to this one.
>>> > +     * jmp_list_first points to the first TB jumping to this one.
>>> > +     * jmp_list_next is used to point to the next TB in a list.
>>> > +     * Since each TB can have two jumps, it can participate in two lists.
>>> > +     * The two least significant bits of a pointer are used to choose which
>>> > +     * data field holds a pointer to the next TB:
>>> > +     * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first.
>>> > +     * In other words, 0/1 tells which jump is used in the pointed TB,
>>> > +     * and 2 means that this is a pointer back to the target TB of this list.
>>> > +     */
>>> > +    struct TranslationBlock *jmp_list_next[2];
>>> > +    struct TranslationBlock *jmp_list_first;
>> OK I found that tricky to follow. Where does the value of the pointer
>> come from that sets these bottom bits? The TB jumping to this TB sets it?
>
> Yeah, that's not easy to describe. Initially, we set:
>
>     tb->jmp_list_first = tb | 2
>
> That makes an empty list: jmp_list_first just points to the this TB and
> the low bits are 2.
>
> After that we can add a TB to the list in tb_add_jump():
>
>     tb->jmp_list_next[n] = tb_next->jmp_list_first;
>     tb_next->jmp_list_first = tb | n;
>
> where 'tb' is going to jump to 'tb_next', 'n' (can be 0 or 1) is an
> index of jump target of 'tb'.

Where I get confused it what is the point of jmp_list_first? If these
are two circular lists do we care which the first in the list is? The
exit condition when coming out of searching seems when ntb with index =
orig tb with index.

>
> (I simplified the code here)
>
> Any ideas how to make it more clear in the comment?
>
> Kind regards,
> Sergey


--
Alex Bennée
Sergey Fedorov March 24, 2016, 3:10 p.m. UTC | #4
On 24/03/16 18:01, Alex Bennée wrote:
> Sergey Fedorov <serge.fdrv@gmail.com> writes:
>
>> On 24/03/16 16:42, Alex Bennée wrote:
>>>> diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
>>>>> index 05a151da4a54..cc3d2ca25917 100644
>>>>> --- a/include/exec/exec-all.h
>>>>> +++ b/include/exec/exec-all.h
>>>>> @@ -257,20 +257,32 @@ struct TranslationBlock {
>>>>>      struct TranslationBlock *page_next[2];
>>>>>      tb_page_addr_t page_addr[2];
>>>>>
>>>>> -    /* the following data are used to directly call another TB from
>>>>> -       the code of this one. */
>>>>> -    uint16_t tb_next_offset[2]; /* offset of original jump target */
>>>>> +    /* The following data are used to directly call another TB from
>>>>> +     * the code of this one. This can be done either by emitting direct or
>>>>> +     * indirect native jump instructions. These jumps are reset so that the TB
>>>>> +     * just continue its execution. The TB can be linked to another one by
>>>>> +     * setting one of the jump targets (or patching the jump instruction). Only
>>>>> +     * two of such jumps are supported.
>>>>> +     */
>>>>> +    uint16_t jmp_reset_offset[2]; /* offset of original jump target */
>>>>> +#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
>>>>>  #ifdef USE_DIRECT_JUMP
>>>>> -    uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
>>>>> +    uint16_t jmp_insn_offset[2]; /* offset of native jump instruction */
>>>>>  #else
>>>>> -    uintptr_t tb_next[2]; /* address of jump generated code */
>>>>> +    uintptr_t jmp_target_addr[2]; /* target address for indirect jump */
>>>>>  #endif
>>>>> -    /* list of TBs jumping to this one. This is a circular list using
>>>>> -       the two least significant bits of the pointers to tell what is
>>>>> -       the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
>>>>> -       jmp_first */
>>>>> -    struct TranslationBlock *jmp_next[2];
>>>>> -    struct TranslationBlock *jmp_first;
>>>>> +    /* Each TB has an assosiated circular list of TBs jumping to this one.
>>>>> +     * jmp_list_first points to the first TB jumping to this one.
>>>>> +     * jmp_list_next is used to point to the next TB in a list.
>>>>> +     * Since each TB can have two jumps, it can participate in two lists.
>>>>> +     * The two least significant bits of a pointer are used to choose which
>>>>> +     * data field holds a pointer to the next TB:
>>>>> +     * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first.
>>>>> +     * In other words, 0/1 tells which jump is used in the pointed TB,
>>>>> +     * and 2 means that this is a pointer back to the target TB of this list.
>>>>> +     */
>>>>> +    struct TranslationBlock *jmp_list_next[2];
>>>>> +    struct TranslationBlock *jmp_list_first;
>>> OK I found that tricky to follow. Where does the value of the pointer
>>> come from that sets these bottom bits? The TB jumping to this TB sets it?
>> Yeah, that's not easy to describe. Initially, we set:
>>
>>     tb->jmp_list_first = tb | 2
>>
>> That makes an empty list: jmp_list_first just points to the this TB and
>> the low bits are 2.
>>
>> After that we can add a TB to the list in tb_add_jump():
>>
>>     tb->jmp_list_next[n] = tb_next->jmp_list_first;
>>     tb_next->jmp_list_first = tb | n;
>>
>> where 'tb' is going to jump to 'tb_next', 'n' (can be 0 or 1) is an
>> index of jump target of 'tb'.
> Where I get confused it what is the point of jmp_list_first? If these
> are two circular lists do we care which the first in the list is? The
> exit condition when coming out of searching seems when ntb with index =
> orig tb with index.

So 'tb->jmp_list_first' points to the first TB jumping to 'tb'. Then we
use 'jmp_list_next[n]' of that TB to traverse the list further.
Eventually, we get 'jmp_list_next[n] & 3 == 2' which means
jmp_list_next[n] points back to the target TB. Hope it helps :)

Kind regards,
Sergey
Paolo Bonzini March 24, 2016, 3:11 p.m. UTC | #5
On 24/03/2016 16:01, Alex Bennée wrote:
>>> >> OK I found that tricky to follow. Where does the value of the pointer
>>> >> come from that sets these bottom bits? The TB jumping to this TB sets it?
>
> Where I get confused it what is the point of jmp_list_first? If these
> are two circular lists do we care which the first in the list is? The
> exit condition when coming out of searching seems when ntb with index =
> orig tb with index.

Say you have a list for blocks that jump to TB. The next pointer is in
jmp_list_next[0] for blocks whose first jump is to TB. It is in
jmp_list_next[1] for blocks whose second jump is to TB.

However, because it is a circular list, you also need TB itself to be a
part of the list. For TB, the next pointer is in jmp_list_first.

Because TB probably doesn't jump to itself, the first link of the list
of blocks that jumps to TB is not in jmp_list_next[].  Thus QEMU places
it in tb->jmp_list_first.

Say you have three tbs.  TB1's first jump and TB2's second jump lead to
TB0.  Then the list starting at tb0->jmp_list_first goes like this:

    tb0->jmp_list_first = tb1 | 0;
      .--------------------'    |
     |                 .--------'
    tb1->jmp_list_next[0] = tb2 | 1;
      .--------------------'      |
      |                 .---------'
    tb2->jmp_list_next[1] = tb0 | 2;

There is also a case where a TB jumps to itself; it then appears twice
in the list with different values in the low bits, such as this:

    tb->jmp_list_first = tb | 0;
     .--------------------'   |
     |                .-------'
    tb->jmp_list_next[0] = tb | 2;

Other blocks jumping to TB would appear in the same list, too, either
before or after the tb|0 link.

Paolo
Alex Bennée March 24, 2016, 3:23 p.m. UTC | #6
Paolo Bonzini <pbonzini@redhat.com> writes:

> On 24/03/2016 16:01, Alex Bennée wrote:
>>>> >> OK I found that tricky to follow. Where does the value of the pointer
>>>> >> come from that sets these bottom bits? The TB jumping to this TB sets it?
>>
>> Where I get confused it what is the point of jmp_list_first? If these
>> are two circular lists do we care which the first in the list is? The
>> exit condition when coming out of searching seems when ntb with index =
>> orig tb with index.
>
> Say you have a list for blocks that jump to TB. The next pointer is in
> jmp_list_next[0] for blocks whose first jump is to TB. It is in
> jmp_list_next[1] for blocks whose second jump is to TB.
>
> However, because it is a circular list, you also need TB itself to be a
> part of the list. For TB, the next pointer is in jmp_list_first.
>
> Because TB probably doesn't jump to itself, the first link of the list
> of blocks that jumps to TB is not in jmp_list_next[].  Thus QEMU places
> it in tb->jmp_list_first.
>
> Say you have three tbs.  TB1's first jump and TB2's second jump lead to
> TB0.  Then the list starting at tb0->jmp_list_first goes like this:
>
>     tb0->jmp_list_first = tb1 | 0;
>       .--------------------'    |
>      |                 .--------'
>     tb1->jmp_list_next[0] = tb2 | 1;
>       .--------------------'      |
>       |                 .---------'
>     tb2->jmp_list_next[1] = tb0 | 2;
>
> There is also a case where a TB jumps to itself; it then appears twice
> in the list with different values in the low bits, such as this:
>
>     tb->jmp_list_first = tb | 0;
>      .--------------------'   |
>      |                .-------'
>     tb->jmp_list_next[0] = tb | 2;
>
> Other blocks jumping to TB would appear in the same list, too, either
> before or after the tb|0 link.

Right I follow now. Extra ascii art always helps ;-)

--
Alex Bennée
Richard Henderson March 28, 2016, 10:12 p.m. UTC | #7
On 03/24/2016 08:11 AM, Paolo Bonzini wrote:
> There is also a case where a TB jumps to itself; it then appears twice
> in the list with different values in the low bits, such as this:
>
>      tb->jmp_list_first = tb | 0;
>       .--------------------'   |
>       |                .-------'
>      tb->jmp_list_next[0] = tb | 2;

Of course, it begs the question of why TB would be in its own list, even if it 
does jump to itself.  We only need the points-to list in order to invalidate a 
TB and unlink it.  But if TB is being invalidated, we don't need to reset the 
jump within TB itself.


r~
Paolo Bonzini March 29, 2016, 8:14 a.m. UTC | #8
On 29/03/2016 00:12, Richard Henderson wrote:
>> There is also a case where a TB jumps to itself; it then appears twice
>> in the list with different values in the low bits, such as this:
>>
>>      tb->jmp_list_first = tb | 0;
>>       .--------------------'   |
>>       |                .-------'
>>      tb->jmp_list_next[0] = tb | 2;
> 
> Of course, it begs the question of why TB would be in its own list, even
> if it does jump to itself.  We only need the points-to list in order to
> invalidate a TB and unlink it.  But if TB is being invalidated, we don't
> need to reset the jump within TB itself.

Isn't it just because TB acts as the list head in the circular list?  Or
am I misunderstanding you?

Paolo
Sergey Fedorov March 29, 2016, 8:31 a.m. UTC | #9
On 29/03/16 01:12, Richard Henderson wrote:
> On 03/24/2016 08:11 AM, Paolo Bonzini wrote:
>> There is also a case where a TB jumps to itself; it then appears twice
>> in the list with different values in the low bits, such as this:
>>
>>      tb->jmp_list_first = tb | 0;
>>       .--------------------'   |
>>       |                .-------'
>>      tb->jmp_list_next[0] = tb | 2;
>
> Of course, it begs the question of why TB would be in its own list,
> even if it does jump to itself.  We only need the points-to list in
> order to invalidate a TB and unlink it.  But if TB is being
> invalidated, we don't need to reset the jump within TB itself.

If we're going to move tb_phys_invalidate() outside of tb_lock, we
probably need to reset all jumps to the TB, even if it jumps to itself,
so that it eventually finish its execution.

Kind regards,
Sergey
Paolo Bonzini March 29, 2016, 8:51 a.m. UTC | #10
On 29/03/2016 10:14, Paolo Bonzini wrote:
> 
> 
> On 29/03/2016 00:12, Richard Henderson wrote:
>>> There is also a case where a TB jumps to itself; it then appears twice
>>> in the list with different values in the low bits, such as this:
>>>
>>>      tb->jmp_list_first = tb | 0;
>>>       .--------------------'   |
>>>       |                .-------'
>>>      tb->jmp_list_next[0] = tb | 2;
>>
>> Of course, it begs the question of why TB would be in its own list, even
>> if it does jump to itself.  We only need the points-to list in order to
>> invalidate a TB and unlink it.  But if TB is being invalidated, we don't
>> need to reset the jump within TB itself.
> 
> Isn't it just because TB acts as the list head in the circular list?  Or
> am I misunderstanding you?

Ok, that was dumb. :) Understood it now.

Paolo
Richard Henderson March 29, 2016, 3:37 p.m. UTC | #11
On 03/29/2016 01:31 AM, Sergey Fedorov wrote:
> On 29/03/16 01:12, Richard Henderson wrote:
>> On 03/24/2016 08:11 AM, Paolo Bonzini wrote:
>>> There is also a case where a TB jumps to itself; it then appears twice
>>> in the list with different values in the low bits, such as this:
>>>
>>>      tb->jmp_list_first = tb | 0;
>>>       .--------------------'   |
>>>       |                .-------'
>>>      tb->jmp_list_next[0] = tb | 2;
>>
>> Of course, it begs the question of why TB would be in its own list,
>> even if it does jump to itself.  We only need the points-to list in
>> order to invalidate a TB and unlink it.  But if TB is being
>> invalidated, we don't need to reset the jump within TB itself.
> 
> If we're going to move tb_phys_invalidate() outside of tb_lock, we
> probably need to reset all jumps to the TB, even if it jumps to itself,
> so that it eventually finish its execution.

Ah, interesting point.  Very true.


r~
Peter Maydell March 29, 2016, 4:26 p.m. UTC | #12
On 29 March 2016 at 09:31, Sergey Fedorov <serge.fdrv@gmail.com> wrote:
> On 29/03/16 01:12, Richard Henderson wrote:
>> On 03/24/2016 08:11 AM, Paolo Bonzini wrote:
>>> There is also a case where a TB jumps to itself; it then appears twice
>>> in the list with different values in the low bits, such as this:
>>>
>>>      tb->jmp_list_first = tb | 0;
>>>       .--------------------'   |
>>>       |                .-------'
>>>      tb->jmp_list_next[0] = tb | 2;
>>
>> Of course, it begs the question of why TB would be in its own list,
>> even if it does jump to itself.  We only need the points-to list in
>> order to invalidate a TB and unlink it.  But if TB is being
>> invalidated, we don't need to reset the jump within TB itself.
>
> If we're going to move tb_phys_invalidate() outside of tb_lock, we
> probably need to reset all jumps to the TB, even if it jumps to itself,
> so that it eventually finish its execution.

This is likely also the historical reason for the current code --
originally we handled requesting a CPU exit by unlinking the TB,
so you needed to be able to detach jumps-to-self (these days we do
it by checking a flag at the start of each TB).

thanks
-- PMM
Sergey Fedorov March 29, 2016, 5:58 p.m. UTC | #13
On 29/03/16 19:26, Peter Maydell wrote:
> On 29 March 2016 at 09:31, Sergey Fedorov <serge.fdrv@gmail.com> wrote:
>> On 29/03/16 01:12, Richard Henderson wrote:
>>> On 03/24/2016 08:11 AM, Paolo Bonzini wrote:
>>>> There is also a case where a TB jumps to itself; it then appears twice
>>>> in the list with different values in the low bits, such as this:
>>>>
>>>>      tb->jmp_list_first = tb | 0;
>>>>       .--------------------'   |
>>>>       |                .-------'
>>>>      tb->jmp_list_next[0] = tb | 2;
>>> Of course, it begs the question of why TB would be in its own list,
>>> even if it does jump to itself.  We only need the points-to list in
>>> order to invalidate a TB and unlink it.  But if TB is being
>>> invalidated, we don't need to reset the jump within TB itself.
>> If we're going to move tb_phys_invalidate() outside of tb_lock, we
>> probably need to reset all jumps to the TB, even if it jumps to itself,
>> so that it eventually finish its execution.
> This is likely also the historical reason for the current code --
> originally we handled requesting a CPU exit by unlinking the TB,
> so you needed to be able to detach jumps-to-self (these days we do
> it by checking a flag at the start of each TB).

I'm not sure if CPU exit request is raised each time TB gets invalidated...

Kind regards,
Sergey
diff mbox

Patch

diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 05a151da4a54..cc3d2ca25917 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -257,20 +257,32 @@  struct TranslationBlock {
     struct TranslationBlock *page_next[2];
     tb_page_addr_t page_addr[2];
 
-    /* the following data are used to directly call another TB from
-       the code of this one. */
-    uint16_t tb_next_offset[2]; /* offset of original jump target */
+    /* The following data are used to directly call another TB from
+     * the code of this one. This can be done either by emitting direct or
+     * indirect native jump instructions. These jumps are reset so that the TB
+     * just continue its execution. The TB can be linked to another one by
+     * setting one of the jump targets (or patching the jump instruction). Only
+     * two of such jumps are supported.
+     */
+    uint16_t jmp_reset_offset[2]; /* offset of original jump target */
+#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
 #ifdef USE_DIRECT_JUMP
-    uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
+    uint16_t jmp_insn_offset[2]; /* offset of native jump instruction */
 #else
-    uintptr_t tb_next[2]; /* address of jump generated code */
+    uintptr_t jmp_target_addr[2]; /* target address for indirect jump */
 #endif
-    /* list of TBs jumping to this one. This is a circular list using
-       the two least significant bits of the pointers to tell what is
-       the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
-       jmp_first */
-    struct TranslationBlock *jmp_next[2];
-    struct TranslationBlock *jmp_first;
+    /* Each TB has an assosiated circular list of TBs jumping to this one.
+     * jmp_list_first points to the first TB jumping to this one.
+     * jmp_list_next is used to point to the next TB in a list.
+     * Since each TB can have two jumps, it can participate in two lists.
+     * The two least significant bits of a pointer are used to choose which
+     * data field holds a pointer to the next TB:
+     * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first.
+     * In other words, 0/1 tells which jump is used in the pointed TB,
+     * and 2 means that this is a pointer back to the target TB of this list.
+     */
+    struct TranslationBlock *jmp_list_next[2];
+    struct TranslationBlock *jmp_list_first;
 };
 
 #include "qemu/thread.h"
@@ -359,7 +371,7 @@  void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
 static inline void tb_set_jmp_target(TranslationBlock *tb,
                                      int n, uintptr_t addr)
 {
-    uint16_t offset = tb->tb_jmp_offset[n];
+    uint16_t offset = tb->jmp_insn_offset[n];
     tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
 }
 
@@ -369,7 +381,7 @@  static inline void tb_set_jmp_target(TranslationBlock *tb,
 static inline void tb_set_jmp_target(TranslationBlock *tb,
                                      int n, uintptr_t addr)
 {
-    tb->tb_next[n] = addr;
+    tb->jmp_target_addr[n] = addr;
 }
 
 #endif
@@ -378,13 +390,13 @@  static inline void tb_add_jump(TranslationBlock *tb, int n,
                                TranslationBlock *tb_next)
 {
     /* NOTE: this test is only needed for thread safety */
-    if (!tb->jmp_next[n]) {
+    if (!tb->jmp_list_next[n]) {
         /* patch the native jump address */
         tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
 
         /* add in TB jmp circular list */
-        tb->jmp_next[n] = tb_next->jmp_first;
-        tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
+        tb->jmp_list_next[n] = tb_next->jmp_list_first;
+        tb_next->jmp_list_first = (TranslationBlock *)((uintptr_t)tb | n);
     }
 }
 
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
index 0ed10a974121..08efdf41da48 100644
--- a/tcg/aarch64/tcg-target.inc.c
+++ b/tcg/aarch64/tcg-target.inc.c
@@ -1294,12 +1294,13 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc,
 #ifndef USE_DIRECT_JUMP
 #error "USE_DIRECT_JUMP required for aarch64"
 #endif
-        assert(s->tb_jmp_offset != NULL); /* consistency for USE_DIRECT_JUMP */
-        s->tb_jmp_offset[a0] = tcg_current_code_size(s);
+        /* consistency for USE_DIRECT_JUMP */
+        assert(s->tb_jmp_insn_offset != NULL);
+        s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
         /* actual branch destination will be patched by
            aarch64_tb_set_jmp_target later, beware retranslation. */
         tcg_out_goto_noaddr(s);
-        s->tb_next_offset[a0] = tcg_current_code_size(s);
+        s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
         break;
 
     case INDEX_op_br:
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
index 3edf6a6f971c..a9147620b073 100644
--- a/tcg/arm/tcg-target.inc.c
+++ b/tcg/arm/tcg-target.inc.c
@@ -1647,17 +1647,17 @@  static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
         tcg_out_goto(s, COND_AL, tb_ret_addr);
         break;
     case INDEX_op_goto_tb:
-        if (s->tb_jmp_offset) {
+        if (s->tb_jmp_insn_offset) {
             /* Direct jump method */
-            s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
+            s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
             tcg_out_b_noaddr(s, COND_AL);
         } else {
             /* Indirect jump method */
-            intptr_t ptr = (intptr_t)(s->tb_next + args[0]);
+            intptr_t ptr = (intptr_t)(s->tb_jmp_target_addr + args[0]);
             tcg_out_movi32(s, COND_AL, TCG_REG_R0, ptr & ~0xfff);
             tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, ptr & 0xfff);
         }
-        s->tb_next_offset[args[0]] = tcg_current_code_size(s);
+        s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
         break;
     case INDEX_op_br:
         tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
index 9187d34caf6d..2f98cae97f3b 100644
--- a/tcg/i386/tcg-target.inc.c
+++ b/tcg/i386/tcg-target.inc.c
@@ -1775,17 +1775,17 @@  static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
         tcg_out_jmp(s, tb_ret_addr);
         break;
     case INDEX_op_goto_tb:
-        if (s->tb_jmp_offset) {
+        if (s->tb_jmp_insn_offset) {
             /* direct jump method */
             tcg_out8(s, OPC_JMP_long); /* jmp im */
-            s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
+            s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
             tcg_out32(s, 0);
         } else {
             /* indirect jump method */
             tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
-                                 (intptr_t)(s->tb_next + args[0]));
+                                 (intptr_t)(s->tb_jmp_target_addr + args[0]));
         }
-        s->tb_next_offset[args[0]] = tcg_current_code_size(s);
+        s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
         break;
     case INDEX_op_br:
         tcg_out_jxx(s, JCC_JMP, arg_label(args[0]), 0);
diff --git a/tcg/ia64/tcg-target.inc.c b/tcg/ia64/tcg-target.inc.c
index 62d654943c20..261861f90c3a 100644
--- a/tcg/ia64/tcg-target.inc.c
+++ b/tcg/ia64/tcg-target.inc.c
@@ -881,13 +881,13 @@  static void tcg_out_exit_tb(TCGContext *s, tcg_target_long arg)
 
 static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
 {
-    if (s->tb_jmp_offset) {
+    if (s->tb_jmp_insn_offset) {
         /* direct jump method */
         tcg_abort();
     } else {
         /* indirect jump method */
         tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2,
-                     (tcg_target_long)(s->tb_next + arg));
+                     (tcg_target_long)(s->tb_jmp_target_addr + arg));
         tcg_out_bundle(s, MmI,
                        tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1,
                                    TCG_REG_R2, TCG_REG_R2),
@@ -900,7 +900,7 @@  static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
                        tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4,
                                    TCG_REG_B6));
     }
-    s->tb_next_offset[arg] = tcg_current_code_size(s);
+    s->tb_jmp_reset_offset[arg] = tcg_current_code_size(s);
 }
 
 static inline void tcg_out_jmp(TCGContext *s, TCGArg addr)
diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c
index 297bd00910b7..b2a839ac2cd3 100644
--- a/tcg/mips/tcg-target.inc.c
+++ b/tcg/mips/tcg-target.inc.c
@@ -1397,19 +1397,19 @@  static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
         }
         break;
     case INDEX_op_goto_tb:
-        if (s->tb_jmp_offset) {
+        if (s->tb_jmp_insn_offset) {
             /* direct jump method */
-            s->tb_jmp_offset[a0] = tcg_current_code_size(s);
+            s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
             /* Avoid clobbering the address during retranslation.  */
             tcg_out32(s, OPC_J | (*(uint32_t *)s->code_ptr & 0x3ffffff));
         } else {
             /* indirect jump method */
             tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
-                       (uintptr_t)(s->tb_next + a0));
+                       (uintptr_t)(s->tb_jmp_target_addr + a0));
             tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
         }
         tcg_out_nop(s);
-        s->tb_next_offset[a0] = tcg_current_code_size(s);
+        s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
         break;
     case INDEX_op_br:
         tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO,
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
index 8c1c2dfa9b22..10394079ad9d 100644
--- a/tcg/ppc/tcg-target.inc.c
+++ b/tcg/ppc/tcg-target.inc.c
@@ -1894,17 +1894,17 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
         tcg_out_b(s, 0, tb_ret_addr);
         break;
     case INDEX_op_goto_tb:
-        tcg_debug_assert(s->tb_jmp_offset);
+        tcg_debug_assert(s->tb_jmp_insn_offset);
         /* Direct jump.  Ensure the next insns are 8-byte aligned. */
         if ((uintptr_t)s->code_ptr & 7) {
             tcg_out32(s, NOP);
         }
-        s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
+        s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
         /* To be replaced by either a branch+nop or a load into TMP1.  */
         s->code_ptr += 2;
         tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
         tcg_out32(s, BCCTR | BO_ALWAYS);
-        s->tb_next_offset[args[0]] = tcg_current_code_size(s);
+        s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
         break;
     case INDEX_op_br:
         {
diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c
index fbf97bb2e15d..e95b04b0e278 100644
--- a/tcg/s390/tcg-target.inc.c
+++ b/tcg/s390/tcg-target.inc.c
@@ -1715,17 +1715,18 @@  static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
         break;
 
     case INDEX_op_goto_tb:
-        if (s->tb_jmp_offset) {
+        if (s->tb_jmp_insn_offset) {
             tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
-            s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
+            s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
             s->code_ptr += 2;
         } else {
-            /* load address stored at s->tb_next + args[0] */
-            tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
+            /* load address stored at s->tb_jmp_target_addr + args[0] */
+            tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0,
+                           s->tb_jmp_target_addr + args[0]);
             /* and go there */
             tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
         }
-        s->tb_next_offset[args[0]] = tcg_current_code_size(s);
+        s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
         break;
 
     OP_32_64(ld8u):
diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c
index 54df1bc42432..a611885a2aaf 100644
--- a/tcg/sparc/tcg-target.inc.c
+++ b/tcg/sparc/tcg-target.inc.c
@@ -1229,18 +1229,19 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc,
         }
         break;
     case INDEX_op_goto_tb:
-        if (s->tb_jmp_offset) {
+        if (s->tb_jmp_insn_offset) {
             /* direct jump method */
-            s->tb_jmp_offset[a0] = tcg_current_code_size(s);
+            s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
             /* Make sure to preserve links during retranslation.  */
             tcg_out32(s, CALL | (*s->code_ptr & ~INSN_OP(-1)));
         } else {
             /* indirect jump method */
-            tcg_out_ld_ptr(s, TCG_REG_T1, (uintptr_t)(s->tb_next + a0));
+            tcg_out_ld_ptr(s, TCG_REG_T1,
+                           (uintptr_t)(s->tb_jmp_target_addr + a0));
             tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL);
         }
         tcg_out_nop(s);
-        s->tb_next_offset[a0] = tcg_current_code_size(s);
+        s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
         break;
     case INDEX_op_br:
         tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
diff --git a/tcg/tcg.h b/tcg/tcg.h
index b83f76351ccd..f9d11b29442b 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -510,9 +510,9 @@  struct TCGContext {
 
     /* goto_tb support */
     tcg_insn_unit *code_buf;
-    uintptr_t *tb_next;
-    uint16_t *tb_next_offset;
-    uint16_t *tb_jmp_offset; /* != NULL if USE_DIRECT_JUMP */
+    uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
+    uint16_t *tb_jmp_insn_offset; /* tb->jmp_insn_offset if USE_DIRECT_JUMP */
+    uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_addr if !USE_DIRECT_JUMP */
 
     /* liveness analysis */
     uint16_t *op_dead_args; /* for each operation, each bit tells if the
diff --git a/tcg/tci/tcg-target.inc.c b/tcg/tci/tcg-target.inc.c
index 4afe4d7a8d59..4e91687abd1c 100644
--- a/tcg/tci/tcg-target.inc.c
+++ b/tcg/tci/tcg-target.inc.c
@@ -553,17 +553,17 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
         tcg_out64(s, args[0]);
         break;
     case INDEX_op_goto_tb:
-        if (s->tb_jmp_offset) {
+        if (s->tb_jmp_insn_offset) {
             /* Direct jump method. */
-            assert(args[0] < ARRAY_SIZE(s->tb_jmp_offset));
-            s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
+            assert(args[0] < ARRAY_SIZE(s->tb_jmp_insn_offset));
+            s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
             tcg_out32(s, 0);
         } else {
             /* Indirect jump method. */
             TODO();
         }
-        assert(args[0] < ARRAY_SIZE(s->tb_next_offset));
-        s->tb_next_offset[args[0]] = tcg_current_code_size(s);
+        assert(args[0] < ARRAY_SIZE(s->tb_jmp_reset_offset));
+        s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
         break;
     case INDEX_op_br:
         tci_out_label(s, arg_label(args[0]));
diff --git a/translate-all.c b/translate-all.c
index e9f409b762ab..31cdf8ae217e 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -929,7 +929,7 @@  static inline void tb_jmp_remove(TranslationBlock *tb, int n)
     TranslationBlock *tb1, **ptb;
     unsigned int n1;
 
-    ptb = &tb->jmp_next[n];
+    ptb = &tb->jmp_list_next[n];
     tb1 = *ptb;
     if (tb1) {
         /* find tb(n) in circular list */
@@ -941,15 +941,15 @@  static inline void tb_jmp_remove(TranslationBlock *tb, int n)
                 break;
             }
             if (n1 == 2) {
-                ptb = &tb1->jmp_first;
+                ptb = &tb1->jmp_list_first;
             } else {
-                ptb = &tb1->jmp_next[n1];
+                ptb = &tb1->jmp_list_next[n1];
             }
         }
         /* now we can suppress tb(n) from the list */
-        *ptb = tb->jmp_next[n];
+        *ptb = tb->jmp_list_next[n];
 
-        tb->jmp_next[n] = NULL;
+        tb->jmp_list_next[n] = NULL;
     }
 }
 
@@ -957,7 +957,8 @@  static inline void tb_jmp_remove(TranslationBlock *tb, int n)
    another TB */
 static inline void tb_reset_jump(TranslationBlock *tb, int n)
 {
-    tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
+    uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
+    tb_set_jmp_target(tb, n, addr);
 }
 
 /* invalidate one TB */
@@ -1001,19 +1002,21 @@  void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
     tb_jmp_remove(tb, 1);
 
     /* suppress any remaining jumps to this TB */
-    tb1 = tb->jmp_first;
+    tb1 = tb->jmp_list_first;
     for (;;) {
         n1 = (uintptr_t)tb1 & 3;
         if (n1 == 2) {
             break;
         }
         tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
-        tb2 = tb1->jmp_next[n1];
+        tb2 = tb1->jmp_list_next[n1];
         tb_reset_jump(tb1, n1);
-        tb1->jmp_next[n1] = NULL;
+        tb1->jmp_list_next[n1] = NULL;
         tb1 = tb2;
     }
-    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
+
+    /* fail safe */
+    tb->jmp_list_first = (TranslationBlock *)((uintptr_t)tb | 2);
 
     tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
 }
@@ -1098,15 +1101,15 @@  TranslationBlock *tb_gen_code(CPUState *cpu,
     trace_translate_block(tb, tb->pc, tb->tc_ptr);
 
     /* generate machine code */
-    tb->tb_next_offset[0] = 0xffff;
-    tb->tb_next_offset[1] = 0xffff;
-    tcg_ctx.tb_next_offset = tb->tb_next_offset;
+    tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
+    tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
+    tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
 #ifdef USE_DIRECT_JUMP
-    tcg_ctx.tb_jmp_offset = tb->tb_jmp_offset;
-    tcg_ctx.tb_next = NULL;
+    tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
+    tcg_ctx.tb_jmp_target_addr = NULL;
 #else
-    tcg_ctx.tb_jmp_offset = NULL;
-    tcg_ctx.tb_next = tb->tb_next;
+    tcg_ctx.tb_jmp_insn_offset = NULL;
+    tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
 #endif
 
 #ifdef CONFIG_PROFILER
@@ -1486,15 +1489,15 @@  static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
         tb->page_addr[1] = -1;
     }
 
-    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
-    tb->jmp_next[0] = NULL;
-    tb->jmp_next[1] = NULL;
+    tb->jmp_list_first = (TranslationBlock *)((uintptr_t)tb | 2);
+    tb->jmp_list_next[0] = NULL;
+    tb->jmp_list_next[1] = NULL;
 
     /* init original jump addresses */
-    if (tb->tb_next_offset[0] != 0xffff) {
+    if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
         tb_reset_jump(tb, 0);
     }
-    if (tb->tb_next_offset[1] != 0xffff) {
+    if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
         tb_reset_jump(tb, 1);
     }
 
@@ -1687,9 +1690,9 @@  void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
         if (tb->page_addr[1] != -1) {
             cross_page++;
         }
-        if (tb->tb_next_offset[0] != 0xffff) {
+        if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
             direct_jmp_count++;
-            if (tb->tb_next_offset[1] != 0xffff) {
+            if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
                 direct_jmp2_count++;
             }
         }