Patchwork kvm: avoid reentring kvm_flush_coalesced_mmio_buffer()

login
register
mail settings
Submitter Avi Kivity
Date Oct. 18, 2011, 5:45 p.m.
Message ID <1318959904-9617-1-git-send-email-avi@redhat.com>
Download mbox | patch
Permalink /patch/120474/
State New
Headers show

Comments

Avi Kivity - Oct. 18, 2011, 5:45 p.m.
mmio callbacks invoked by kvm_flush_coalesced_mmio_buffer() may
themselves indirectly call kvm_flush_coalesced_mmio_buffer().
Prevent reentering the function by checking a flag that indicates
we're processing coalesced mmio requests.

Signed-off-by: Avi Kivity <avi@redhat.com>
---
 kvm-all.c |   10 ++++++++++
 1 files changed, 10 insertions(+), 0 deletions(-)

Patch

diff --git a/kvm-all.c b/kvm-all.c
index e783b23..4c8aebd 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -64,6 +64,7 @@  struct KVMState
     int vmfd;
     int coalesced_mmio;
     struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
+    bool coalesced_flush_in_progress;
     int broken_set_mem_region;
     int migration_log;
     int vcpu_events;
@@ -897,6 +898,13 @@  static int kvm_handle_internal_error(CPUState *env, struct kvm_run *run)
 void kvm_flush_coalesced_mmio_buffer(void)
 {
     KVMState *s = kvm_state;
+
+    if (s->coalesced_flush_in_progress) {
+        return;
+    }
+
+    s->coalesced_flush_in_progress = true;
+
     if (s->coalesced_mmio_ring) {
         struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
         while (ring->first != ring->last) {
@@ -909,6 +917,8 @@  void kvm_flush_coalesced_mmio_buffer(void)
             ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
         }
     }
+
+    s->coalesced_flush_in_progress = false;
 }
 
 static void do_kvm_cpu_synchronize_state(void *_env)