@@ -1165,7 +1165,7 @@ void qemu_mutex_lock_iothread(void)
/* In the simple case there is no need to bump the VCPU thread out of
* TCG code execution.
*/
- if (!tcg_enabled() || qemu_in_vcpu_thread() ||
+ if (!tcg_any_enabled() || qemu_in_vcpu_thread() ||
!first_cpu || !first_cpu->thread) {
qemu_mutex_lock(&qemu_global_mutex);
atomic_dec(&iothread_requesting_mutex);
@@ -926,7 +926,7 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
page, end - page);
- if (dirty && tcg_enabled()) {
+ if (dirty && tcg_any_enabled()) {
tlb_reset_dirty_range_all(start, length);
}
@@ -2592,7 +2592,7 @@ void cpu_flush_icache_range(hwaddr start, int len)
* so there is no need to flush anything. For KVM / Xen we need to flush
* the host's instruction cache at least.
*/
- if (tcg_enabled()) {
+ if (!tcg_all_enabled()) {
return;
}
@@ -164,14 +164,15 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp);
atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp);
- if (tcg_enabled()) {
+ if (tcg_any_enabled()) {
atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp);
}
}
}
xen_modified_memory(start, pages << TARGET_PAGE_BITS);
} else {
- uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
+ uint8_t clients = tcg_any_enabled() ? DIRTY_CLIENTS_ALL
+ : DIRTY_CLIENTS_NOCODE;
/*
* bitmap-traveling is faster than memory-traveling (for addr...)
* especially when most of the memory is not dirty.
@@ -1198,7 +1198,7 @@ void memory_region_init_ram(MemoryRegion *mr,
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->ram_addr = qemu_ram_alloc(size, mr, errp);
- mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ mr->dirty_log_mask = tcg_any_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
void memory_region_init_resizeable_ram(MemoryRegion *mr,
@@ -1216,7 +1216,7 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr,
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->ram_addr = qemu_ram_alloc_resizeable(size, max_size, resized, mr, errp);
- mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ mr->dirty_log_mask = tcg_any_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
#ifdef __linux__
@@ -1233,7 +1233,7 @@ void memory_region_init_ram_from_file(MemoryRegion *mr,
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->ram_addr = qemu_ram_alloc_from_file(size, mr, share, path, errp);
- mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ mr->dirty_log_mask = tcg_any_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
#endif
@@ -1247,7 +1247,7 @@ void memory_region_init_ram_ptr(MemoryRegion *mr,
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram_from_ptr;
- mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ mr->dirty_log_mask = tcg_any_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
/* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
assert(ptr != NULL);
Convert core code usages of tcg_enabled() which don't have a specific CPU associated with, to either tcg_any_enabled() or tcg_all_enabled(). This is to prepare support for multiple tcg engines, where queries must query a specific CPU or use global any/all logic. Signed-off-by: Peter Crosthwaite <crosthwaite.peter@gmail.com> --- Changed since RFCv3: Tweak commit subject (too long) cpus.c | 2 +- exec.c | 4 ++-- include/exec/ram_addr.h | 5 +++-- memory.c | 8 ++++---- 4 files changed, 10 insertions(+), 9 deletions(-)