diff mbox

[25/29] memory: use atomic ops for setting dirty memory bits

Message ID 1430152117-100558-26-git-send-email-pbonzini@redhat.com
State New
Headers show

Commit Message

Paolo Bonzini April 27, 2015, 4:28 p.m. UTC
From: Stefan Hajnoczi <stefanha@redhat.com>

Use set_bit_atomic() and bitmap_set_atomic() so that multiple threads
can dirty memory without race conditions.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <1417519399-3166-4-git-send-email-stefanha@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 include/exec/ram_addr.h | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

Comments

Fam Zheng May 26, 2015, 12:29 p.m. UTC | #1
On Mon, 04/27 18:28, Paolo Bonzini wrote:
> From: Stefan Hajnoczi <stefanha@redhat.com>
> 
> Use set_bit_atomic() and bitmap_set_atomic() so that multiple threads
> can dirty memory without race conditions.
> 
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> Message-Id: <1417519399-3166-4-git-send-email-stefanha@redhat.com>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

Reviewed-by: Fam Zheng <famz@redhat.com>
diff mbox

Patch

diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 40b4b47..bb69a28 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -114,7 +114,7 @@  static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
                                                       unsigned client)
 {
     assert(client < DIRTY_MEMORY_NUM);
-    set_bit(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
+    set_bit_atomic(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
 }
 
 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
@@ -122,17 +122,18 @@  static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
                                                        uint8_t mask)
 {
     unsigned long end, page;
+    unsigned long **d = ram_list.dirty_memory;
 
     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
     page = start >> TARGET_PAGE_BITS;
     if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
-        bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page);
+        bitmap_set_atomic(d[DIRTY_MEMORY_MIGRATION], page, end - page);
     }
     if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
-        bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page);
+        bitmap_set_atomic(d[DIRTY_MEMORY_VGA], page, end - page);
     }
     if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
-        bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page);
+        bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
     }
     xen_modified_memory(start, length);
 }
@@ -159,11 +160,12 @@  static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
         for (k = 0; k < nr; k++) {
             if (bitmap[k]) {
                 unsigned long temp = leul_to_cpu(bitmap[k]);
+                unsigned long **d = ram_list.dirty_memory;
 
-                ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION][page + k] |= temp;
-                ram_list.dirty_memory[DIRTY_MEMORY_VGA][page + k] |= temp;
+                atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp);
+                atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp);
                 if (tcg_enabled()) {
-                    ram_list.dirty_memory[DIRTY_MEMORY_CODE][page + k] |= temp;
+                    atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp);
                 }
             }
         }