diff mbox

[v6,7/8] memory: introduce tls context to trace nested mmio request issue

Message ID 1352093924-17598-8-git-send-email-qemulist@gmail.com
State New
Headers show

Commit Message

pingfan liu Nov. 5, 2012, 5:38 a.m. UTC
From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>

After breaking down big lock, nested MMIO request which not targeting
at RAM can cause deadlock issue. Supposing the scene: dev_a,b with
fine-grain locks lockA/B, then ABBA dealock issue can be triggered.
We fix this by tracing and rejecting such request.

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 exec.c        |   47 +++++++++++++++++++++++++++++++++++++++++++++++
 qemu-thread.h |    7 +++++++
 2 files changed, 54 insertions(+), 0 deletions(-)

Comments

Jan Kiszka Nov. 5, 2012, 6:57 a.m. UTC | #1
On 2012-11-05 06:38, Liu Ping Fan wrote:
> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> 
> After breaking down big lock, nested MMIO request which not targeting
> at RAM can cause deadlock issue. Supposing the scene: dev_a,b with
> fine-grain locks lockA/B, then ABBA dealock issue can be triggered.
> We fix this by tracing and rejecting such request.
> 
> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> ---
>  exec.c        |   47 +++++++++++++++++++++++++++++++++++++++++++++++
>  qemu-thread.h |    7 +++++++
>  2 files changed, 54 insertions(+), 0 deletions(-)
> 
> diff --git a/exec.c b/exec.c
> index fa34ef9..1eb920d 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -3442,6 +3442,48 @@ static bool address_space_section_lookup_ref(AddressSpace *as,
>      return safe_ref;
>  }
>  
> +typedef struct ThreadContext {
> +  DispatchType dispatch_type;
> +  unsigned int mmio_req_pending;
> +} ThreadContext;
> +
> +static __thread ThreadContext thread_context = {
          ^^^^^^^^
Again, you will have to work on qemu-tls.h and then use DEFINE_TLS. The
above is not portable.

> +    .dispatch_type = DISPATCH_INIT,
> +    .mmio_req_pending = 0
> +};
> +
> +void qemu_thread_set_dispatch_type(DispatchType type)
> +{
> +    thread_context.dispatch_type = type;
> +}
> +
> +void qemu_thread_reset_dispatch_type(void)
> +{
> +    thread_context.dispatch_type = DISPATCH_INIT;
> +}
> +
> +static void address_space_check_inc_req_pending(MemoryRegionSection *section)
> +{
> +    bool nested = false;
> +
> +    /* currently, only mmio out of big lock, and need this to avoid dead lock */
> +    if (thread_context.dispatch_type == DISPATCH_MMIO) {
> +        nested = ++thread_context.mmio_req_pending > 1 ? true : false;
> +        /* To fix, will filter iommu case */
> +        if (nested && !memory_region_is_ram(section->mr)) {
> +            fprintf(stderr, "mmio: nested target not RAM is not support");
> +            abort();
> +        }
> +    }

This should already take PIO into account, thus all scenarios: If we are
dispatching MMIO or PIO, reject any further requests that are not
targeting RAM.

I don't think we need mmio_req_pending for this. We are not interested
in differentiating between MMIO and PIO, both will be problematic. We
just store the information if a request is going on in the TLS variable
here, not before entering cpu_physical_memory_xxx. And then we can
simply bail out if another non-RAM request is arriving, the nesting
level will never be >1.

And with bailing out I mean warn once + ignore request, not abort().
This would be a needless guest triggerable VM termination.

Jan
diff mbox

Patch

diff --git a/exec.c b/exec.c
index fa34ef9..1eb920d 100644
--- a/exec.c
+++ b/exec.c
@@ -3442,6 +3442,48 @@  static bool address_space_section_lookup_ref(AddressSpace *as,
     return safe_ref;
 }
 
+typedef struct ThreadContext {
+  DispatchType dispatch_type;
+  unsigned int mmio_req_pending;
+} ThreadContext;
+
+static __thread ThreadContext thread_context = {
+    .dispatch_type = DISPATCH_INIT,
+    .mmio_req_pending = 0
+};
+
+void qemu_thread_set_dispatch_type(DispatchType type)
+{
+    thread_context.dispatch_type = type;
+}
+
+void qemu_thread_reset_dispatch_type(void)
+{
+    thread_context.dispatch_type = DISPATCH_INIT;
+}
+
+static void address_space_check_inc_req_pending(MemoryRegionSection *section)
+{
+    bool nested = false;
+
+    /* currently, only mmio out of big lock, and need this to avoid dead lock */
+    if (thread_context.dispatch_type == DISPATCH_MMIO) {
+        nested = ++thread_context.mmio_req_pending > 1 ? true : false;
+        /* To fix, will filter iommu case */
+        if (nested && !memory_region_is_ram(section->mr)) {
+            fprintf(stderr, "mmio: nested target not RAM is not support");
+            abort();
+        }
+    }
+}
+
+static void address_space_dec_req_pending(void)
+{
+    if (thread_context.dispatch_type == DISPATCH_MMIO) {
+        thread_context.mmio_req_pending--;
+    }
+}
+
 void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf,
                       int len, bool is_write)
 {
@@ -3462,6 +3504,8 @@  void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf,
         qemu_mutex_lock(&as->lock);
         safe_ref = memory_region_section_lookup_ref(d, page, &obj_mrs);
         qemu_mutex_unlock(&as->lock);
+        address_space_check_inc_req_pending(&obj_mrs);
+
         if (!safe_ref) {
             qemu_mutex_lock_iothread();
             qemu_mutex_lock(&as->lock);
@@ -3477,6 +3521,7 @@  void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf,
         if (is_write) {
             if (!memory_region_is_ram(section->mr)) {
                 target_phys_addr_t addr1;
+
                 addr1 = memory_region_section_addr(section, addr);
                 /* XXX: could force cpu_single_env to NULL to avoid
                    potential bugs */
@@ -3510,6 +3555,7 @@  void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf,
             if (!(memory_region_is_ram(section->mr) ||
                   memory_region_is_romd(section->mr))) {
                 target_phys_addr_t addr1;
+
                 /* I/O case */
                 addr1 = memory_region_section_addr(section, addr);
                 if (l >= 4 && ((addr1 & 3) == 0)) {
@@ -3537,6 +3583,7 @@  void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf,
                 qemu_put_ram_ptr(ptr);
             }
         }
+        address_space_dec_req_pending();
         memory_region_section_unref(&obj_mrs);
         len -= l;
         buf += l;
diff --git a/qemu-thread.h b/qemu-thread.h
index 05fdaaf..fc9e17b 100644
--- a/qemu-thread.h
+++ b/qemu-thread.h
@@ -7,6 +7,11 @@ 
 typedef struct QemuMutex QemuMutex;
 typedef struct QemuCond QemuCond;
 typedef struct QemuThread QemuThread;
+typedef enum {
+  DISPATCH_INIT = 0,
+  DISPATCH_MMIO,
+  DISPATCH_IO,
+} DispatchType;
 
 #ifdef _WIN32
 #include "qemu-thread-win32.h"
@@ -46,4 +51,6 @@  void qemu_thread_get_self(QemuThread *thread);
 bool qemu_thread_is_self(QemuThread *thread);
 void qemu_thread_exit(void *retval);
 
+void qemu_thread_set_dispatch_type(DispatchType type);
+void qemu_thread_reset_dispatch_type(void);
 #endif