@@ -28,6 +28,12 @@ extern uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
extern void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
+extern uint32_t glue(address_space_cmpxchgl_notdirty, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint32_t old, uint32_t new, MemTxAttrs attrs,
+ MemTxResult *result);
+extern uint32_t glue(address_space_cmpxchgq_notdirty, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint64_t old, uint64_t new, MemTxAttrs attrs,
+ MemTxResult *result);
extern void glue(address_space_stw, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
extern void glue(address_space_stl, SUFFIX)(ARG1_DECL,
@@ -320,6 +320,84 @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
RCU_READ_UNLOCK();
}
+/* This is meant to be used for atomic PTE updates under MT-TCG */
+uint32_t glue(address_space_cmpxchgl_notdirty, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint32_t old, uint32_t new, MemTxAttrs attrs, MemTxResult *result)
+{
+ uint8_t *ptr;
+ MemoryRegion *mr;
+ hwaddr l = 4;
+ hwaddr addr1;
+ MemTxResult r;
+ uint8_t dirty_log_mask;
+
+ /* Must test result */
+ assert(result);
+
+ RCU_READ_LOCK();
+ mr = TRANSLATE(addr, &addr1, &l, true, attrs);
+ if (l < 4 || !memory_access_is_direct(mr, true)) {
+ r = MEMTX_ERROR;
+ } else {
+ uint32_t orig = old;
+
+ ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
+ old = atomic_cmpxchg(ptr, orig, new);
+
+ if (old == orig) {
+ dirty_log_mask = memory_region_get_dirty_log_mask(mr);
+ dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
+ cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
+ 4, dirty_log_mask);
+ }
+ r = MEMTX_OK;
+ }
+ *result = r;
+ RCU_READ_UNLOCK();
+
+ return old;
+}
+
+#ifdef CONFIG_ATOMIC64
+/* This is meant to be used for atomic PTE updates under MT-TCG */
+uint32_t glue(address_space_cmpxchgq_notdirty, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint64_t old, uint64_t new, MemTxAttrs attrs, MemTxResult *result)
+{
+ uint8_t *ptr;
+ MemoryRegion *mr;
+ hwaddr l = 8;
+ hwaddr addr1;
+ MemTxResult r;
+ uint8_t dirty_log_mask;
+
+ /* Must test result */
+ assert(result);
+
+ RCU_READ_LOCK();
+ mr = TRANSLATE(addr, &addr1, &l, true, attrs);
+ if (l < 8 || !memory_access_is_direct(mr, true)) {
+ r = MEMTX_ERROR;
+ } else {
+ uint32_t orig = old;
+
+ ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
+ old = atomic_cmpxchg(ptr, orig, new);
+
+ if (old == orig) {
+ dirty_log_mask = memory_region_get_dirty_log_mask(mr);
+ dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
+ cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
+ 8, dirty_log_mask);
+ }
+ r = MEMTX_OK;
+ }
+ *result = r;
+ RCU_READ_UNLOCK();
+
+ return old;
+}
+#endif /* CONFIG_ATOMIC64 */
+
/* warning: addr must be aligned */
static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs,
On some architectures, PTE updates for dirty and changed bits need to be performed atomically. This adds a couple of address_space_cmpxchg* helpers for that purpose. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> --- include/exec/memory_ldst.inc.h | 6 +++ memory_ldst.inc.c | 78 ++++++++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+)