@@ -606,6 +606,81 @@ glue(glue(helper_cmpxchg, SUFFIX),
return atomic_cmpxchg((DATA_TYPE *)haddr, old, new);
}
+#define GEN_ATOMIC_HELPER(NAME) \
+DATA_TYPE \
+glue(glue(glue(helper_atomic_, NAME), SUFFIX), \
+ MMUSUFFIX)(CPUArchState *env, target_ulong addr, DATA_TYPE val, \
+ TCGMemOpIdx oi, uintptr_t retaddr) \
+{ \
+ unsigned mmu_idx = get_mmuidx(oi); \
+ int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); \
+ target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; \
+ uintptr_t haddr; \
+ \
+ /* Adjust the given return address. */ \
+ retaddr -= GETPC_ADJ; \
+ \
+ /* If the TLB entry is for a different page, reload and try again */\
+ if ((addr & TARGET_PAGE_MASK) \
+ != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { \
+ if (unlikely((addr & (DATA_SIZE - 1)) != 0 \
+ && (get_memop(oi) & MO_AMASK) == MO_ALIGN)) { \
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,\
+ mmu_idx, retaddr); \
+ } \
+ if (!VICTIM_TLB_HIT(addr_write)) { \
+ tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, \
+ retaddr); \
+ } \
+ tlb_addr = env->tlb_table[mmu_idx][index].addr_write; \
+ } \
+ \
+ /* Handle an IO access. */ \
+ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { \
+ /* XXX */ \
+ abort(); \
+ } \
+ \
+ /* Handle slow unaligned access (it spans two pages or IO). */ \
+ if (DATA_SIZE > 1 \
+ && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 \
+ >= TARGET_PAGE_SIZE)) { \
+ if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) { \
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, \
+ mmu_idx, retaddr); \
+ } \
+ } \
+ \
+ /* Handle aligned access or unaligned access in the same page. */ \
+ if (unlikely((addr & (DATA_SIZE - 1)) != 0 \
+ && (get_memop(oi) & MO_AMASK) == MO_ALIGN)) { \
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, \
+ mmu_idx, retaddr); \
+ } \
+ /* \
+ * If the host allows unaligned accesses, then let the compiler \
+ * do its thing when performing the access on the host. \
+ */ \
+ haddr = addr + env->tlb_table[mmu_idx][index].addend; \
+ return glue(atomic_, NAME)((DATA_TYPE *)haddr, val); \
+} \
+
+GEN_ATOMIC_HELPER(fetch_add)
+GEN_ATOMIC_HELPER(fetch_sub)
+GEN_ATOMIC_HELPER(fetch_and)
+GEN_ATOMIC_HELPER(fetch_or)
+GEN_ATOMIC_HELPER(fetch_xor)
+
+GEN_ATOMIC_HELPER(add_fetch)
+GEN_ATOMIC_HELPER(sub_fetch)
+GEN_ATOMIC_HELPER(and_fetch)
+GEN_ATOMIC_HELPER(or_fetch)
+GEN_ATOMIC_HELPER(xor_fetch)
+
+GEN_ATOMIC_HELPER(xchg)
+
+#undef GEN_ATOMIC_HELPER
+
#endif /* !defined(SOFTMMU_CODE_ACCESS) */
#undef READ_ACCESS_TYPE
@@ -1122,6 +1122,36 @@ uint64_t helper_cmpxchgq_mmu(CPUArchState *env, target_ulong addr,
uint64_t old, uint64_t new,
TCGMemOpIdx oi, uintptr_t retaddr);
+#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
+TYPE glue(glue(glue(helper_atomic_, \
+ NAME), \
+ SUFFIX), \
+ _mmu)(CPUArchState *env, target_ulong addr, TYPE val, \
+ TCGMemOpIdx oi, uintptr_t retaddr);
+
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
+ GEN_ATOMIC_HELPER(NAME, uint8_t, b) \
+ GEN_ATOMIC_HELPER(NAME, uint16_t, w) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l) \
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q)
+
+GEN_ATOMIC_HELPER_ALL(fetch_add)
+GEN_ATOMIC_HELPER_ALL(fetch_sub)
+GEN_ATOMIC_HELPER_ALL(fetch_and)
+GEN_ATOMIC_HELPER_ALL(fetch_or)
+GEN_ATOMIC_HELPER_ALL(fetch_xor)
+
+GEN_ATOMIC_HELPER_ALL(add_fetch)
+GEN_ATOMIC_HELPER_ALL(sub_fetch)
+GEN_ATOMIC_HELPER_ALL(and_fetch)
+GEN_ATOMIC_HELPER_ALL(or_fetch)
+GEN_ATOMIC_HELPER_ALL(xor_fetch)
+
+GEN_ATOMIC_HELPER_ALL(xchg)
+
+#undef GEN_ATOMIC_HELPER_ALL
+#undef GEN_ATOMIC_HELPER
+
#endif /* CONFIG_SOFTMMU */
#endif /* TCG_H */
Signed-off-by: Emilio G. Cota <cota@braap.org> --- softmmu_template.h | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ tcg/tcg.h | 30 ++++++++++++++++++++++ 2 files changed, 105 insertions(+)