diff mbox

[V3,06/11] memory: make mmio dispatch able to be out of biglock

Message ID 1347349912-15611-7-git-send-email-qemulist@gmail.com
State New
Headers show

Commit Message

pingfan liu Sept. 11, 2012, 7:51 a.m. UTC
From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>

Without biglock, we try to protect the mr by increase refcnt.
If we can inc refcnt, go backward and resort to biglock.

Another point is memory radix-tree can be flushed by another
thread, so we should get the copy of terminal mr to survive
from such issue.

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 exec.c |   95 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 94 insertions(+), 1 deletions(-)

Comments

Avi Kivity Sept. 11, 2012, 8:47 a.m. UTC | #1
On 09/11/2012 10:51 AM, Liu Ping Fan wrote:
> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> 
> Without biglock, we try to protect the mr by increase refcnt.
> If we can inc refcnt, go backward and resort to biglock.
> 
> Another point is memory radix-tree can be flushed by another
> thread, so we should get the copy of terminal mr to survive
> from such issue.
> 
>  void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
>                              int len, int is_write)
>  {
> @@ -3413,14 +3479,35 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
>      uint8_t *ptr;
>      uint32_t val;
>      target_phys_addr_t page;
> -    MemoryRegionSection *section;
> +    MemoryRegionSection *section, obj_mrs;
> +    int ret;
> +    int need_biglock = 0;
>  
> +    /* Will finally removed after all mr->ops implement ref/unref() */
> +try_big_lock:
> +    if (need_biglock == 1) {
> +        qemu_mutex_lock_iothread();
> +    }
>      while (len > 0) {
>          page = addr & TARGET_PAGE_MASK;
>          l = (page + TARGET_PAGE_SIZE) - addr;
>          if (l > len)
>              l = len;
> +
> +        qemu_mutex_lock(&mem_map_lock);
>          section = phys_page_find(page >> TARGET_PAGE_BITS);
> +        if (need_biglock == 0) {
> +            obj_mrs = mrs_get_terminal(section, addr);
> +            ret = mrs_ref(&obj_mrs);
> +            if (ret <= 0) {
> +                need_biglock = 1;
> +                qemu_mutex_unlock(&mem_map_lock);
> +                goto try_big_lock;
> +            }
> +            /* rely on local variable */
> +            section = &obj_mrs;
> +        }
> +        qemu_mutex_unlock(&mem_map_lock);
>  

If on the first pass we find that we need the big lock, we may find on
the second pass that we don't need it since the memory map has changed.
 So on the second pass we might actually need to drop the big lock.

Could code it like

  section, need_lock = lookup(...)
  if need_lock:
     lock(big_lock)
     section, need_lock = lookup(...)
     if not need_lock:
        unlock(big_lock)
  dispatch(section)
  if need_lock:
     unlock(big_lock)

It's ugly as hell, so we'll to apologize to readers in a big comment
explaining what's going on.
diff mbox

Patch

diff --git a/exec.c b/exec.c
index 5834766..93205b1 100644
--- a/exec.c
+++ b/exec.c
@@ -3163,8 +3163,11 @@  static void io_mem_init(void)
                           "watch", UINT64_MAX);
 }
 
+static QemuMutex mem_map_lock;
+
 static void core_begin(MemoryListener *listener)
 {
+    qemu_mutex_lock(&mem_map_lock);
     destroy_all_mappings();
     phys_sections_clear();
     phys_map.ptr = PHYS_MAP_NODE_NIL;
@@ -3184,17 +3187,32 @@  static void core_commit(MemoryListener *listener)
     for(env = first_cpu; env != NULL; env = env->next_cpu) {
         tlb_flush(env, 1);
     }
+    qemu_mutex_unlock(&mem_map_lock);
 }
 
 static void core_region_add(MemoryListener *listener,
                             MemoryRegionSection *section)
 {
+    MemoryRegion *mr = section->mr;
+
+    if (mr->ops != NULL) {
+        if (mr->ops->ref != NULL) {
+            mr->ops->ref(mr);
+        }
+    }
     cpu_register_physical_memory_log(section, section->readonly);
 }
 
 static void core_region_del(MemoryListener *listener,
                             MemoryRegionSection *section)
 {
+    MemoryRegion *mr = section->mr;
+
+    if (mr->ops != NULL) {
+        if (mr->ops->unref != NULL) {
+            mr->ops->unref(mr);
+        }
+    }
 }
 
 static void core_region_nop(MemoryListener *listener,
@@ -3348,6 +3366,8 @@  static void memory_map_init(void)
     memory_region_init(system_io, "io", 65536);
     set_system_io_map(system_io);
 
+    qemu_mutex_init(&mem_map_lock);
+
     memory_listener_register(&core_memory_listener, system_memory);
     memory_listener_register(&io_memory_listener, system_io);
 }
@@ -3406,6 +3426,52 @@  int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
 }
 
 #else
+
+static MemoryRegionSection *subpage_get_terminal(subpage_t *mmio,
+    target_phys_addr_t addr)
+{
+    MemoryRegionSection *section;
+    unsigned int idx = SUBPAGE_IDX(addr);
+
+    section = &phys_sections[mmio->sub_section[idx]];
+    return section;
+}
+
+static MemoryRegionSection mrs_get_terminal(MemoryRegionSection *mrs,
+    target_phys_addr_t addr)
+{
+    if (mrs->mr->subpage) {
+        mrs = subpage_get_terminal(mrs->mr->opaque, addr);
+    }
+    return *mrs;
+}
+
+static int mrs_ref(MemoryRegionSection *mrs)
+{
+    MemoryRegion *mr;
+    int ret;
+
+    mr = mrs->mr;
+    if (mr->ops != NULL) {
+        if (mr->ops->ref != NULL) {
+            ret = mr->ops->ref(mr);
+        }
+    }
+    return ret;
+}
+
+static void mrs_unref(MemoryRegionSection *mrs)
+{
+    MemoryRegion *mr;
+
+    mr = mrs->mr;
+    if (mr->ops != NULL) {
+        if (mr->ops->unref != NULL) {
+            mr->ops->unref(mr);
+        }
+    }
+}
+
 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
                             int len, int is_write)
 {
@@ -3413,14 +3479,35 @@  void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
     uint8_t *ptr;
     uint32_t val;
     target_phys_addr_t page;
-    MemoryRegionSection *section;
+    MemoryRegionSection *section, obj_mrs;
+    int ret;
+    int need_biglock = 0;
 
+    /* Will finally removed after all mr->ops implement ref/unref() */
+try_big_lock:
+    if (need_biglock == 1) {
+        qemu_mutex_lock_iothread();
+    }
     while (len > 0) {
         page = addr & TARGET_PAGE_MASK;
         l = (page + TARGET_PAGE_SIZE) - addr;
         if (l > len)
             l = len;
+
+        qemu_mutex_lock(&mem_map_lock);
         section = phys_page_find(page >> TARGET_PAGE_BITS);
+        if (need_biglock == 0) {
+            obj_mrs = mrs_get_terminal(section, addr);
+            ret = mrs_ref(&obj_mrs);
+            if (ret <= 0) {
+                need_biglock = 1;
+                qemu_mutex_unlock(&mem_map_lock);
+                goto try_big_lock;
+            }
+            /* rely on local variable */
+            section = &obj_mrs;
+        }
+        qemu_mutex_unlock(&mem_map_lock);
 
         if (is_write) {
             if (!memory_region_is_ram(section->mr)) {
@@ -3491,10 +3578,16 @@  void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
                 qemu_put_ram_ptr(ptr);
             }
         }
+
+        mrs_unref(&obj_mrs);
         len -= l;
         buf += l;
         addr += l;
     }
+
+    if (need_biglock == 1) {
+        qemu_mutex_unlock_iothread();
+    }
 }
 
 /* used for ROM loading : can write in RAM and ROM */