From patchwork Fri Sep 21 14:08:09 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Juan Quintela X-Patchwork-Id: 185785 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 545562C0081 for ; Sat, 22 Sep 2012 01:25:44 +1000 (EST) Received: from localhost ([::1]:36077 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1TF3v5-0002mE-L3 for incoming@patchwork.ozlabs.org; Fri, 21 Sep 2012 10:09:27 -0400 Received: from eggs.gnu.org ([208.118.235.92]:41248) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1TF3uH-0001bV-Pr for qemu-devel@nongnu.org; Fri, 21 Sep 2012 10:08:43 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1TF3uB-0007IO-HM for qemu-devel@nongnu.org; Fri, 21 Sep 2012 10:08:37 -0400 Received: from mx1.redhat.com ([209.132.183.28]:15861) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1TF3uB-0007IB-9B for qemu-devel@nongnu.org; Fri, 21 Sep 2012 10:08:31 -0400 Received: from int-mx01.intmail.prod.int.phx2.redhat.com (int-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.11]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id q8LE8URk007490 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK) for ; Fri, 21 Sep 2012 10:08:30 -0400 Received: from trasno.mitica (ovpn-116-55.ams2.redhat.com [10.36.116.55]) by int-mx01.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id q8LE8LUk020823; Fri, 21 Sep 2012 10:08:28 -0400 From: Juan Quintela To: qemu-devel@nongnu.org Date: Fri, 21 Sep 2012 16:08:09 +0200 Message-Id: <1348236500-2565-4-git-send-email-quintela@redhat.com> In-Reply-To: <1348236500-2565-1-git-send-email-quintela@redhat.com> References: <1348236500-2565-1-git-send-email-quintela@redhat.com> X-Scanned-By: MIMEDefang 2.67 on 10.5.11.11 X-detected-operating-system: by eggs.gnu.org: Genre and OS details not recognized. X-Received-From: 209.132.183.28 Cc: Paolo Bonzini , Umesh Deshpande Subject: [Qemu-devel] [PATCH 03/14] protect the ramlist with a separate mutex X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org From: Umesh Deshpande Add the new mutex that protects shared state between ram_save_live and the iothread. If the iothread mutex has to be taken together with the ramlist mutex, the iothread shall always be _outside_. Signed-off-by: Paolo Bonzini Signed-off-by: Umesh Deshpande Signed-off-by: Juan Quintela --- arch_init.c | 9 ++++++++- cpu-all.h | 8 ++++++++ exec.c | 23 +++++++++++++++++++++-- 3 files changed, 37 insertions(+), 3 deletions(-) diff --git a/arch_init.c b/arch_init.c index eb33fdd..0d963b4 100644 --- a/arch_init.c +++ b/arch_init.c @@ -553,7 +553,6 @@ static void ram_migration_cancel(void *opaque) migration_end(); } - static void reset_ram_globals(void) { last_block = NULL; @@ -573,6 +572,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque) bitmap_set(migration_bitmap, 1, ram_pages); migration_dirty_pages = ram_pages; + qemu_mutex_lock_ramlist(); migration_bitmap_sync(); bytes_transferred = 0; reset_ram_globals(); @@ -600,6 +600,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque) qemu_put_be64(f, block->length); } + qemu_mutex_unlock_ramlist(); qemu_put_be64(f, RAM_SAVE_FLAG_EOS); return 0; @@ -614,6 +615,8 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) uint64_t expected_downtime; MigrationState *s = migrate_get_current(); + qemu_mutex_lock_ramlist(); + if (ram_list.version != last_version) { reset_ram_globals(); } @@ -662,6 +665,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) bwidth = 0.000001; } + qemu_mutex_unlock_ramlist(); qemu_put_be64(f, RAM_SAVE_FLAG_EOS); expected_downtime = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth; @@ -682,6 +686,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque) { migration_bitmap_sync(); + qemu_mutex_lock_ramlist(); + /* try transferring iterative blocks of memory */ /* flush all remaining blocks regardless of rate limiting */ @@ -697,6 +703,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) } memory_global_dirty_log_stop(); + qemu_mutex_unlock_ramlist(); qemu_put_be64(f, RAM_SAVE_FLAG_EOS); g_free(migration_bitmap); diff --git a/cpu-all.h b/cpu-all.h index 6576229..2b0a640 100644 --- a/cpu-all.h +++ b/cpu-all.h @@ -22,6 +22,7 @@ #include "qemu-common.h" #include "qemu-tls.h" #include "cpu-common.h" +#include "qemu-thread.h" /* some important defines: * @@ -487,7 +488,9 @@ typedef struct RAMBlock { ram_addr_t offset; ram_addr_t length; uint32_t flags; + /* Protected by the iothread lock. */ QLIST_ENTRY(RAMBlock) next_mru; + /* Protected by the ramlist lock. */ QLIST_ENTRY(RAMBlock) next; char idstr[256]; #if defined(__linux__) && !defined(TARGET_S390X) @@ -496,9 +499,12 @@ typedef struct RAMBlock { } RAMBlock; typedef struct RAMList { + QemuMutex mutex; + /* Protected by the iothread lock. */ uint8_t *phys_dirty; uint32_t version; QLIST_HEAD(, RAMBlock) blocks_mru; + /* Protected by the ramlist lock. */ QLIST_HEAD(, RAMBlock) blocks; } RAMList; extern RAMList ram_list; @@ -520,6 +526,8 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf); #endif /* !CONFIG_USER_ONLY */ ram_addr_t last_ram_offset(void); +void qemu_mutex_lock_ramlist(void); +void qemu_mutex_unlock_ramlist(void); int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, uint8_t *buf, int len, int is_write); diff --git a/exec.c b/exec.c index e9d1509..3a8a4dd 100644 --- a/exec.c +++ b/exec.c @@ -637,6 +637,7 @@ bool tcg_enabled(void) void cpu_exec_init_all(void) { + qemu_mutex_init(&ram_list.mutex); #if !defined(CONFIG_USER_ONLY) memory_map_init(); io_mem_init(); @@ -2364,6 +2365,16 @@ static long gethugepagesize(const char *path) return fs.f_bsize; } +void qemu_mutex_lock_ramlist(void) +{ + qemu_mutex_lock(&ram_list.mutex); +} + +void qemu_mutex_unlock_ramlist(void) +{ + qemu_mutex_unlock(&ram_list.mutex); +} + static void *file_ram_alloc(RAMBlock *block, ram_addr_t memory, const char *path) @@ -2519,6 +2530,7 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev) } pstrcat(new_block->idstr, sizeof(new_block->idstr), name); + qemu_mutex_lock_ramlist(); QLIST_FOREACH(block, &ram_list.blocks, next) { if (block != new_block && !strcmp(block->idstr, new_block->idstr)) { fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", @@ -2526,6 +2538,7 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev) abort(); } } + qemu_mutex_unlock_ramlist(); } static int memory_try_enable_merging(void *addr, size_t len) @@ -2549,6 +2562,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, size = TARGET_PAGE_ALIGN(size); new_block = g_malloc0(sizeof(*new_block)); + qemu_mutex_lock_ramlist(); new_block->mr = mr; new_block->offset = find_ram_offset(size); if (host) { @@ -2584,6 +2598,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, QLIST_INSERT_HEAD(&ram_list.blocks_mru, new_block, next_mru); ram_list.version++; + qemu_mutex_unlock_ramlist(); ram_list.phys_dirty = g_realloc(ram_list.phys_dirty, last_ram_offset() >> TARGET_PAGE_BITS); @@ -2608,21 +2623,24 @@ void qemu_ram_free_from_ptr(ram_addr_t addr) { RAMBlock *block; + qemu_mutex_lock_ramlist(); QLIST_FOREACH(block, &ram_list.blocks, next) { if (addr == block->offset) { QLIST_REMOVE(block, next); QLIST_REMOVE(block, next_mru); ram_list.version++; g_free(block); - return; + break; } } + qemu_mutex_unlock_ramlist(); } void qemu_ram_free(ram_addr_t addr) { RAMBlock *block; + qemu_mutex_lock_ramlist(); QLIST_FOREACH(block, &ram_list.blocks, next) { if (addr == block->offset) { QLIST_REMOVE(block, next); @@ -2653,9 +2671,10 @@ void qemu_ram_free(ram_addr_t addr) #endif } g_free(block); - return; + break; } } + qemu_mutex_unlock_ramlist(); }