From patchwork Mon Oct 22 09:23:50 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: pingfank@linux.vnet.ibm.com X-Patchwork-Id: 193183 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 27F142C00F8 for ; Tue, 23 Oct 2012 02:31:49 +1100 (EST) Received: from localhost ([::1]:59776 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1TQJyj-0004Es-SP for incoming@patchwork.ozlabs.org; Mon, 22 Oct 2012 11:31:45 -0400 Received: from eggs.gnu.org ([208.118.235.92]:54799) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1TQEFW-0002ri-M4 for qemu-devel@nongnu.org; Mon, 22 Oct 2012 05:24:44 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1TQEFH-0006gM-MV for qemu-devel@nongnu.org; Mon, 22 Oct 2012 05:24:42 -0400 Received: from e28smtp04.in.ibm.com ([122.248.162.4]:39299) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1TQEFG-0006fW-P3 for qemu-devel@nongnu.org; Mon, 22 Oct 2012 05:24:27 -0400 Received: from /spool/local by e28smtp04.in.ibm.com with IBM ESMTP SMTP Gateway: Authorized Use Only! Violators will be prosecuted for from ; Mon, 22 Oct 2012 14:54:20 +0530 Received: from d28relay04.in.ibm.com (9.184.220.61) by e28smtp04.in.ibm.com (192.168.1.134) with IBM ESMTP SMTP Gateway: Authorized Use Only! Violators will be prosecuted; Mon, 22 Oct 2012 14:54:19 +0530 Received: from d28av01.in.ibm.com (d28av01.in.ibm.com [9.184.220.63]) by d28relay04.in.ibm.com (8.13.8/8.13.8/NCO v10.0) with ESMTP id q9M9OIiB41615432 for ; Mon, 22 Oct 2012 14:54:18 +0530 Received: from d28av01.in.ibm.com (loopback [127.0.0.1]) by d28av01.in.ibm.com (8.14.4/8.13.1/NCO v10.0 AVout) with ESMTP id q9MEs92Y028123 for ; Mon, 22 Oct 2012 14:54:10 GMT Received: from oc8440477808.cn.ibm.com (oc8440477808.cn.ibm.com [9.115.122.202]) by d28av01.in.ibm.com (8.14.4/8.13.1/NCO v10.0 AVin) with ESMTP id q9MErrUh026674; Mon, 22 Oct 2012 14:54:08 GMT From: Liu Ping Fan To: qemu-devel@nongnu.org Date: Mon, 22 Oct 2012 17:23:50 +0800 Message-Id: <1350897839-29593-8-git-send-email-pingfank@linux.vnet.ibm.com> X-Mailer: git-send-email 1.7.4.4 In-Reply-To: <1350897839-29593-1-git-send-email-pingfank@linux.vnet.ibm.com> References: <1350897839-29593-1-git-send-email-pingfank@linux.vnet.ibm.com> x-cbid: 12102209-5564-0000-0000-000004FB0B55 X-detected-operating-system: by eggs.gnu.org: Genre and OS details not recognized. X-Received-From: 122.248.162.4 X-Mailman-Approved-At: Mon, 22 Oct 2012 11:31:25 -0400 Cc: Stefan Hajnoczi , Marcelo Tosatti , Avi Kivity , Anthony Liguori , Jan Kiszka , Paolo Bonzini Subject: [Qemu-devel] [patch v4 07/16] memory: make mmio dispatch able to be out of biglock X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Without biglock, we try to protect the mr by increase refcnt. If we can inc refcnt, go backward and resort to biglock. Another point is memory radix-tree can be flushed by another thread, so we should get the copy of terminal mr to survive from such issue. Signed-off-by: Liu Ping Fan --- exec.c | 125 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 files changed, 117 insertions(+), 8 deletions(-) diff --git a/exec.c b/exec.c index 5834766..91b859b 100644 --- a/exec.c +++ b/exec.c @@ -200,6 +200,8 @@ struct PhysPageEntry { uint16_t ptr : 15; }; +static QemuMutex mem_map_lock; + /* Simple allocator for PhysPageEntry nodes */ static PhysPageEntry (*phys_map_nodes)[L2_SIZE]; static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc; @@ -212,6 +214,8 @@ static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 }; static void io_mem_init(void); static void memory_map_init(void); +static int phys_page_lookup(target_phys_addr_t addr, MemoryRegionSection *mrs); + static MemoryRegion io_mem_watch; #endif @@ -2245,6 +2249,7 @@ static void register_subpage(MemoryRegionSection *section) subpage_t *subpage; target_phys_addr_t base = section->offset_within_address_space & TARGET_PAGE_MASK; + /* Already under the protection of mem_map_lock */ MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS); MemoryRegionSection subsection = { .offset_within_address_space = base, @@ -3165,6 +3170,8 @@ static void io_mem_init(void) static void core_begin(MemoryListener *listener) { + /* protect the updating process of mrs in memory core agaist readers */ + qemu_mutex_lock(&mem_map_lock); destroy_all_mappings(); phys_sections_clear(); phys_map.ptr = PHYS_MAP_NODE_NIL; @@ -3184,17 +3191,32 @@ static void core_commit(MemoryListener *listener) for(env = first_cpu; env != NULL; env = env->next_cpu) { tlb_flush(env, 1); } + qemu_mutex_unlock(&mem_map_lock); } static void core_region_add(MemoryListener *listener, MemoryRegionSection *section) { + MemoryRegion *mr = section->mr; + + if (mr->ops) { + if (mr->ops->ref) { + mr->ops->ref(mr); + } + } cpu_register_physical_memory_log(section, section->readonly); } static void core_region_del(MemoryListener *listener, MemoryRegionSection *section) { + MemoryRegion *mr = section->mr; + + if (mr->ops) { + if (mr->ops->unref) { + mr->ops->unref(mr); + } + } } static void core_region_nop(MemoryListener *listener, @@ -3348,6 +3370,8 @@ static void memory_map_init(void) memory_region_init(system_io, "io", 65536); set_system_io_map(system_io); + qemu_mutex_init(&mem_map_lock); + memory_listener_register(&core_memory_listener, system_memory); memory_listener_register(&io_memory_listener, system_io); } @@ -3406,6 +3430,58 @@ int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, } #else + +static MemoryRegionSection *subpage_get_terminal(subpage_t *mmio, + target_phys_addr_t addr) +{ + MemoryRegionSection *section; + unsigned int idx = SUBPAGE_IDX(addr); + + section = &phys_sections[mmio->sub_section[idx]]; + return section; +} + +static int memory_region_section_ref(MemoryRegionSection *mrs) +{ + MemoryRegion *mr; + int ret = 0; + + mr = mrs->mr; + if (mr->ops) { + if (mr->ops->ref) { + ret = mr->ops->ref(mr); + } + } + return ret; +} + +static void memory_region_section_unref(MemoryRegionSection *mrs) +{ + MemoryRegion *mr; + + mr = mrs->mr; + if (mr->ops) { + if (mr->ops->unref) { + mr->ops->unref(mr); + } + } +} + +static int phys_page_lookup(target_phys_addr_t addr, MemoryRegionSection *mrs) +{ + MemoryRegionSection *section; + int ret; + + section = phys_page_find(addr >> TARGET_PAGE_BITS); + if (section->mr->subpage) { + section = subpage_get_terminal(section->mr->opaque, addr); + } + *mrs = *section; + ret = memory_region_section_ref(mrs); + + return ret; +} + void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, int len, int is_write) { @@ -3413,14 +3489,28 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, uint8_t *ptr; uint32_t val; target_phys_addr_t page; - MemoryRegionSection *section; + MemoryRegionSection *section, obj_mrs; + int safe_ref; while (len > 0) { page = addr & TARGET_PAGE_MASK; l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; - section = phys_page_find(page >> TARGET_PAGE_BITS); + qemu_mutex_lock(&mem_map_lock); + safe_ref = phys_page_lookup(page, &obj_mrs); + qemu_mutex_unlock(&mem_map_lock); + if (safe_ref == 0) { + qemu_mutex_lock_iothread(); + qemu_mutex_lock(&mem_map_lock); + /* At the 2nd try, mem map can change, so need to judge it again */ + safe_ref = phys_page_lookup(page, &obj_mrs); + qemu_mutex_unlock(&mem_map_lock); + if (safe_ref > 0) { + qemu_mutex_unlock_iothread(); + } + } + section = &obj_mrs; if (is_write) { if (!memory_region_is_ram(section->mr)) { @@ -3491,10 +3581,16 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, qemu_put_ram_ptr(ptr); } } + + memory_region_section_unref(&obj_mrs); len -= l; buf += l; addr += l; + if (safe_ref == 0) { + qemu_mutex_unlock_iothread(); + } } + } /* used for ROM loading : can write in RAM and ROM */ @@ -3504,14 +3600,18 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr, int l; uint8_t *ptr; target_phys_addr_t page; - MemoryRegionSection *section; + MemoryRegionSection *section, mr_obj; while (len > 0) { page = addr & TARGET_PAGE_MASK; l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; - section = phys_page_find(page >> TARGET_PAGE_BITS); + + qemu_mutex_lock(&mem_map_lock); + phys_page_lookup(page, &mr_obj); + qemu_mutex_unlock(&mem_map_lock); + section = &mr_obj; if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { @@ -3528,6 +3628,7 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr, len -= l; buf += l; addr += l; + memory_region_section_unref(&mr_obj); } } @@ -3592,7 +3693,7 @@ void *cpu_physical_memory_map(target_phys_addr_t addr, target_phys_addr_t todo = 0; int l; target_phys_addr_t page; - MemoryRegionSection *section; + MemoryRegionSection *section, mr_obj; ram_addr_t raddr = RAM_ADDR_MAX; ram_addr_t rlen; void *ret; @@ -3602,7 +3703,10 @@ void *cpu_physical_memory_map(target_phys_addr_t addr, l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; - section = phys_page_find(page >> TARGET_PAGE_BITS); + qemu_mutex_lock(&mem_map_lock); + phys_page_lookup(page, &mr_obj); + qemu_mutex_unlock(&mem_map_lock); + section = &mr_obj; if (!(memory_region_is_ram(section->mr) && !section->readonly)) { if (todo || bounce.buffer) { @@ -3616,6 +3720,7 @@ void *cpu_physical_memory_map(target_phys_addr_t addr, } *plen = l; + memory_region_section_unref(&mr_obj); return bounce.buffer; } if (!todo) { @@ -3630,6 +3735,7 @@ void *cpu_physical_memory_map(target_phys_addr_t addr, rlen = todo; ret = qemu_ram_ptr_length(raddr, &rlen); *plen = rlen; + memory_region_section_unref(&mr_obj); return ret; } @@ -4239,9 +4345,12 @@ bool virtio_is_big_endian(void) #ifndef CONFIG_USER_ONLY bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr) { - MemoryRegionSection *section; + MemoryRegionSection *section, mr_obj; - section = phys_page_find(phys_addr >> TARGET_PAGE_BITS); + qemu_mutex_lock(&mem_map_lock); + phys_page_lookup(phys_addr, &mr_obj); + qemu_mutex_unlock(&mem_map_lock); + section = &mr_obj; return !(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr));