Message ID | 1428437400-8474-4-git-send-email-peter.maydell@linaro.org |
---|---|
State | New |
Headers | show |
On 07/04/2015 22:09, Peter Maydell wrote: > Make the CPU iotlb a structure rather than a plain hwaddr; > this will allow us to add transaction attributes to it. > > Signed-off-by: Peter Maydell <peter.maydell@linaro.org> > --- > cputlb.c | 4 ++-- > include/exec/cpu-defs.h | 13 +++++++++++-- > softmmu_template.h | 32 +++++++++++++++++--------------- > 3 files changed, 30 insertions(+), 19 deletions(-) > > diff --git a/cputlb.c b/cputlb.c > index 38f2151..5e1cb8f 100644 > --- a/cputlb.c > +++ b/cputlb.c > @@ -301,7 +301,7 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr, > env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; > > /* refill the tlb */ > - env->iotlb[mmu_idx][index] = iotlb - vaddr; > + env->iotlb[mmu_idx][index].addr = iotlb - vaddr; > te->addend = addend - vaddr; > if (prot & PAGE_READ) { > te->addr_read = address; > @@ -349,7 +349,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) > (addr & TARGET_PAGE_MASK))) { > cpu_ldub_code(env1, addr); > } > - pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK; > + pd = env1->iotlb[mmu_idx][page_index].addr & ~TARGET_PAGE_MASK; > mr = iotlb_to_region(cpu, pd); > if (memory_region_is_unassigned(mr)) { > CPUClass *cc = CPU_GET_CLASS(cpu); > diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h > index 0ca6f0b..7f88185 100644 > --- a/include/exec/cpu-defs.h > +++ b/include/exec/cpu-defs.h > @@ -102,12 +102,21 @@ typedef struct CPUTLBEntry { > > QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS)); > > +/* The IOTLB is not accessed directly inline by generated TCG code, > + * so the CPUIOTLBEntry layout is not as critical as that of the > + * CPUTLBEntry. (This is also why we don't want to combine the two > + * structs into one.) > + */ > +typedef struct CPUIOTLBEntry { > + hwaddr addr; > +} CPUIOTLBEntry; > + > #define CPU_COMMON_TLB \ > /* The meaning of the MMU modes is defined in the target code. */ \ > CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ > CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \ > - hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ > - hwaddr iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \ > + CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ > + CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \ > target_ulong tlb_flush_addr; \ > target_ulong tlb_flush_mask; \ > target_ulong vtlb_index; \ > diff --git a/softmmu_template.h b/softmmu_template.h > index 4b9bae7..7a36550 100644 > --- a/softmmu_template.h > +++ b/softmmu_template.h > @@ -123,7 +123,7 @@ > * victim tlb. try to refill from the victim tlb before walking the \ > * page table. */ \ > int vidx; \ > - hwaddr tmpiotlb; \ > + CPUIOTLBEntry tmpiotlb; \ > CPUTLBEntry tmptlb; \ > for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \ > if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\ > @@ -143,12 +143,13 @@ > > #ifndef SOFTMMU_CODE_ACCESS > static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, > - hwaddr physaddr, > + CPUIOTLBEntry *iotlbentry, > target_ulong addr, > uintptr_t retaddr) > { > uint64_t val; > CPUState *cpu = ENV_GET_CPU(env); > + hwaddr physaddr = iotlbentry->addr; > MemoryRegion *mr = iotlb_to_region(cpu, physaddr); > > physaddr = (physaddr & TARGET_PAGE_MASK) + addr; > @@ -195,15 +196,15 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, > > /* Handle an IO access. */ > if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { > - hwaddr ioaddr; > + CPUIOTLBEntry *iotlbentry; > if ((addr & (DATA_SIZE - 1)) != 0) { > goto do_unaligned_access; > } > - ioaddr = env->iotlb[mmu_idx][index]; > + iotlbentry = &env->iotlb[mmu_idx][index]; > > /* ??? Note that the io helpers always read data in the target > byte ordering. We should push the LE/BE request down into io. */ > - res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); > + res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr); > res = TGT_LE(res); > return res; > } > @@ -283,15 +284,15 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, > > /* Handle an IO access. */ > if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { > - hwaddr ioaddr; > + CPUIOTLBEntry *iotlbentry; > if ((addr & (DATA_SIZE - 1)) != 0) { > goto do_unaligned_access; > } > - ioaddr = env->iotlb[mmu_idx][index]; > + iotlbentry = &env->iotlb[mmu_idx][index]; > > /* ??? Note that the io helpers always read data in the target > byte ordering. We should push the LE/BE request down into io. */ > - res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); > + res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr); > res = TGT_BE(res); > return res; > } > @@ -363,12 +364,13 @@ WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr, > #endif > > static inline void glue(io_write, SUFFIX)(CPUArchState *env, > - hwaddr physaddr, > + CPUIOTLBEntry *iotlbentry, > DATA_TYPE val, > target_ulong addr, > uintptr_t retaddr) > { > CPUState *cpu = ENV_GET_CPU(env); > + hwaddr physaddr = iotlbentry->addr; > MemoryRegion *mr = iotlb_to_region(cpu, physaddr); > > physaddr = (physaddr & TARGET_PAGE_MASK) + addr; > @@ -408,16 +410,16 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, > > /* Handle an IO access. */ > if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { > - hwaddr ioaddr; > + CPUIOTLBEntry *iotlbentry; > if ((addr & (DATA_SIZE - 1)) != 0) { > goto do_unaligned_access; > } > - ioaddr = env->iotlb[mmu_idx][index]; > + iotlbentry = &env->iotlb[mmu_idx][index]; > > /* ??? Note that the io helpers always read data in the target > byte ordering. We should push the LE/BE request down into io. */ > val = TGT_LE(val); > - glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); > + glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); > return; > } > > @@ -489,16 +491,16 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, > > /* Handle an IO access. */ > if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { > - hwaddr ioaddr; > + CPUIOTLBEntry *iotlbentry; > if ((addr & (DATA_SIZE - 1)) != 0) { > goto do_unaligned_access; > } > - ioaddr = env->iotlb[mmu_idx][index]; > + iotlbentry = &env->iotlb[mmu_idx][index]; > > /* ??? Note that the io helpers always read data in the target > byte ordering. We should push the LE/BE request down into io. */ > val = TGT_BE(val); > - glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); > + glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); > return; > } > > Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
On Tue, Apr 07, 2015 at 09:09:49PM +0100, Peter Maydell wrote: > Make the CPU iotlb a structure rather than a plain hwaddr; > this will allow us to add transaction attributes to it. > > Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> > --- > cputlb.c | 4 ++-- > include/exec/cpu-defs.h | 13 +++++++++++-- > softmmu_template.h | 32 +++++++++++++++++--------------- > 3 files changed, 30 insertions(+), 19 deletions(-) > > diff --git a/cputlb.c b/cputlb.c > index 38f2151..5e1cb8f 100644 > --- a/cputlb.c > +++ b/cputlb.c > @@ -301,7 +301,7 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr, > env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; > > /* refill the tlb */ > - env->iotlb[mmu_idx][index] = iotlb - vaddr; > + env->iotlb[mmu_idx][index].addr = iotlb - vaddr; > te->addend = addend - vaddr; > if (prot & PAGE_READ) { > te->addr_read = address; > @@ -349,7 +349,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) > (addr & TARGET_PAGE_MASK))) { > cpu_ldub_code(env1, addr); > } > - pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK; > + pd = env1->iotlb[mmu_idx][page_index].addr & ~TARGET_PAGE_MASK; > mr = iotlb_to_region(cpu, pd); > if (memory_region_is_unassigned(mr)) { > CPUClass *cc = CPU_GET_CLASS(cpu); > diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h > index 0ca6f0b..7f88185 100644 > --- a/include/exec/cpu-defs.h > +++ b/include/exec/cpu-defs.h > @@ -102,12 +102,21 @@ typedef struct CPUTLBEntry { > > QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS)); > > +/* The IOTLB is not accessed directly inline by generated TCG code, > + * so the CPUIOTLBEntry layout is not as critical as that of the > + * CPUTLBEntry. (This is also why we don't want to combine the two > + * structs into one.) > + */ > +typedef struct CPUIOTLBEntry { > + hwaddr addr; > +} CPUIOTLBEntry; > + > #define CPU_COMMON_TLB \ > /* The meaning of the MMU modes is defined in the target code. */ \ > CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ > CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \ > - hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ > - hwaddr iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \ > + CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ > + CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \ > target_ulong tlb_flush_addr; \ > target_ulong tlb_flush_mask; \ > target_ulong vtlb_index; \ > diff --git a/softmmu_template.h b/softmmu_template.h > index 4b9bae7..7a36550 100644 > --- a/softmmu_template.h > +++ b/softmmu_template.h > @@ -123,7 +123,7 @@ > * victim tlb. try to refill from the victim tlb before walking the \ > * page table. */ \ > int vidx; \ > - hwaddr tmpiotlb; \ > + CPUIOTLBEntry tmpiotlb; \ > CPUTLBEntry tmptlb; \ > for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \ > if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\ > @@ -143,12 +143,13 @@ > > #ifndef SOFTMMU_CODE_ACCESS > static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, > - hwaddr physaddr, > + CPUIOTLBEntry *iotlbentry, > target_ulong addr, > uintptr_t retaddr) > { > uint64_t val; > CPUState *cpu = ENV_GET_CPU(env); > + hwaddr physaddr = iotlbentry->addr; > MemoryRegion *mr = iotlb_to_region(cpu, physaddr); > > physaddr = (physaddr & TARGET_PAGE_MASK) + addr; > @@ -195,15 +196,15 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, > > /* Handle an IO access. */ > if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { > - hwaddr ioaddr; > + CPUIOTLBEntry *iotlbentry; > if ((addr & (DATA_SIZE - 1)) != 0) { > goto do_unaligned_access; > } > - ioaddr = env->iotlb[mmu_idx][index]; > + iotlbentry = &env->iotlb[mmu_idx][index]; > > /* ??? Note that the io helpers always read data in the target > byte ordering. We should push the LE/BE request down into io. */ > - res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); > + res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr); > res = TGT_LE(res); > return res; > } > @@ -283,15 +284,15 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, > > /* Handle an IO access. */ > if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { > - hwaddr ioaddr; > + CPUIOTLBEntry *iotlbentry; > if ((addr & (DATA_SIZE - 1)) != 0) { > goto do_unaligned_access; > } > - ioaddr = env->iotlb[mmu_idx][index]; > + iotlbentry = &env->iotlb[mmu_idx][index]; > > /* ??? Note that the io helpers always read data in the target > byte ordering. We should push the LE/BE request down into io. */ > - res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); > + res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr); > res = TGT_BE(res); > return res; > } > @@ -363,12 +364,13 @@ WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr, > #endif > > static inline void glue(io_write, SUFFIX)(CPUArchState *env, > - hwaddr physaddr, > + CPUIOTLBEntry *iotlbentry, > DATA_TYPE val, > target_ulong addr, > uintptr_t retaddr) > { > CPUState *cpu = ENV_GET_CPU(env); > + hwaddr physaddr = iotlbentry->addr; > MemoryRegion *mr = iotlb_to_region(cpu, physaddr); > > physaddr = (physaddr & TARGET_PAGE_MASK) + addr; > @@ -408,16 +410,16 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, > > /* Handle an IO access. */ > if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { > - hwaddr ioaddr; > + CPUIOTLBEntry *iotlbentry; > if ((addr & (DATA_SIZE - 1)) != 0) { > goto do_unaligned_access; > } > - ioaddr = env->iotlb[mmu_idx][index]; > + iotlbentry = &env->iotlb[mmu_idx][index]; > > /* ??? Note that the io helpers always read data in the target > byte ordering. We should push the LE/BE request down into io. */ > val = TGT_LE(val); > - glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); > + glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); > return; > } > > @@ -489,16 +491,16 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, > > /* Handle an IO access. */ > if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { > - hwaddr ioaddr; > + CPUIOTLBEntry *iotlbentry; > if ((addr & (DATA_SIZE - 1)) != 0) { > goto do_unaligned_access; > } > - ioaddr = env->iotlb[mmu_idx][index]; > + iotlbentry = &env->iotlb[mmu_idx][index]; > > /* ??? Note that the io helpers always read data in the target > byte ordering. We should push the LE/BE request down into io. */ > val = TGT_BE(val); > - glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); > + glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); > return; > } > > -- > 1.9.1 >
diff --git a/cputlb.c b/cputlb.c index 38f2151..5e1cb8f 100644 --- a/cputlb.c +++ b/cputlb.c @@ -301,7 +301,7 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr, env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; /* refill the tlb */ - env->iotlb[mmu_idx][index] = iotlb - vaddr; + env->iotlb[mmu_idx][index].addr = iotlb - vaddr; te->addend = addend - vaddr; if (prot & PAGE_READ) { te->addr_read = address; @@ -349,7 +349,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) (addr & TARGET_PAGE_MASK))) { cpu_ldub_code(env1, addr); } - pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK; + pd = env1->iotlb[mmu_idx][page_index].addr & ~TARGET_PAGE_MASK; mr = iotlb_to_region(cpu, pd); if (memory_region_is_unassigned(mr)) { CPUClass *cc = CPU_GET_CLASS(cpu); diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h index 0ca6f0b..7f88185 100644 --- a/include/exec/cpu-defs.h +++ b/include/exec/cpu-defs.h @@ -102,12 +102,21 @@ typedef struct CPUTLBEntry { QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS)); +/* The IOTLB is not accessed directly inline by generated TCG code, + * so the CPUIOTLBEntry layout is not as critical as that of the + * CPUTLBEntry. (This is also why we don't want to combine the two + * structs into one.) + */ +typedef struct CPUIOTLBEntry { + hwaddr addr; +} CPUIOTLBEntry; + #define CPU_COMMON_TLB \ /* The meaning of the MMU modes is defined in the target code. */ \ CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \ - hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ - hwaddr iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \ + CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ + CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \ target_ulong tlb_flush_addr; \ target_ulong tlb_flush_mask; \ target_ulong vtlb_index; \ diff --git a/softmmu_template.h b/softmmu_template.h index 4b9bae7..7a36550 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -123,7 +123,7 @@ * victim tlb. try to refill from the victim tlb before walking the \ * page table. */ \ int vidx; \ - hwaddr tmpiotlb; \ + CPUIOTLBEntry tmpiotlb; \ CPUTLBEntry tmptlb; \ for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \ if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\ @@ -143,12 +143,13 @@ #ifndef SOFTMMU_CODE_ACCESS static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, - hwaddr physaddr, + CPUIOTLBEntry *iotlbentry, target_ulong addr, uintptr_t retaddr) { uint64_t val; CPUState *cpu = ENV_GET_CPU(env); + hwaddr physaddr = iotlbentry->addr; MemoryRegion *mr = iotlb_to_region(cpu, physaddr); physaddr = (physaddr & TARGET_PAGE_MASK) + addr; @@ -195,15 +196,15 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; + CPUIOTLBEntry *iotlbentry; if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; } - ioaddr = env->iotlb[mmu_idx][index]; + iotlbentry = &env->iotlb[mmu_idx][index]; /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); + res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr); res = TGT_LE(res); return res; } @@ -283,15 +284,15 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; + CPUIOTLBEntry *iotlbentry; if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; } - ioaddr = env->iotlb[mmu_idx][index]; + iotlbentry = &env->iotlb[mmu_idx][index]; /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); + res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr); res = TGT_BE(res); return res; } @@ -363,12 +364,13 @@ WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr, #endif static inline void glue(io_write, SUFFIX)(CPUArchState *env, - hwaddr physaddr, + CPUIOTLBEntry *iotlbentry, DATA_TYPE val, target_ulong addr, uintptr_t retaddr) { CPUState *cpu = ENV_GET_CPU(env); + hwaddr physaddr = iotlbentry->addr; MemoryRegion *mr = iotlb_to_region(cpu, physaddr); physaddr = (physaddr & TARGET_PAGE_MASK) + addr; @@ -408,16 +410,16 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; + CPUIOTLBEntry *iotlbentry; if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; } - ioaddr = env->iotlb[mmu_idx][index]; + iotlbentry = &env->iotlb[mmu_idx][index]; /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ val = TGT_LE(val); - glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); + glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); return; } @@ -489,16 +491,16 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; + CPUIOTLBEntry *iotlbentry; if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; } - ioaddr = env->iotlb[mmu_idx][index]; + iotlbentry = &env->iotlb[mmu_idx][index]; /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ val = TGT_BE(val); - glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); + glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); return; }
Make the CPU iotlb a structure rather than a plain hwaddr; this will allow us to add transaction attributes to it. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> --- cputlb.c | 4 ++-- include/exec/cpu-defs.h | 13 +++++++++++-- softmmu_template.h | 32 +++++++++++++++++--------------- 3 files changed, 30 insertions(+), 19 deletions(-)