diff mbox

[RFC,v4,2/5] ramlist mutex

Message ID 8f99d56f3a48b6255cf70425bc435d8f231f5352.1313552764.git.udeshpan@redhat.com
State New
Headers show

Commit Message

Umesh Deshpande Aug. 17, 2011, 3:56 a.m. UTC
ramlist mutex is implemented to protect the RAMBlock list traversal in the
migration thread from their addition/removal from the iothread.

Signed-off-by: Umesh Deshpande <udeshpan@redhat.com>
---
 cpu-all.h     |    2 ++
 exec.c        |   19 +++++++++++++++++++
 qemu-common.h |    2 ++
 3 files changed, 23 insertions(+), 0 deletions(-)

Comments

Paolo Bonzini Aug. 17, 2011, 6:28 a.m. UTC | #1
On 08/16/2011 08:56 PM, Umesh Deshpande wrote:
> @@ -3001,8 +3016,10 @@ void qemu_ram_free_from_ptr(ram_addr_t addr)
>
>       QLIST_FOREACH(block,&ram_list.blocks, next) {
>           if (addr == block->offset) {
> +            qemu_mutex_lock_ramlist();
>               QLIST_REMOVE(block, next);
>               QLIST_REMOVE(block, next_mru);
> +            qemu_mutex_unlock_ramlist();
>               qemu_free(block);
>               return;
>           }
> @@ -3015,8 +3032,10 @@ void qemu_ram_free(ram_addr_t addr)
>
>       QLIST_FOREACH(block,&ram_list.blocks, next) {
>           if (addr == block->offset) {
> +            qemu_mutex_lock_ramlist();
>               QLIST_REMOVE(block, next);
>               QLIST_REMOVE(block, next_mru);
> +            qemu_mutex_unlock_ramlist();
>               if (block->flags&  RAM_PREALLOC_MASK) {
>                   ;
>               } else if (mem_path) {

You must protect the whole QLIST_FOREACH.  Otherwise looks good.

Paolo
Umesh Deshpande Aug. 19, 2011, 6:20 a.m. UTC | #2
On 08/17/2011 02:28 AM, Paolo Bonzini wrote:
> On 08/16/2011 08:56 PM, Umesh Deshpande wrote:
>> @@ -3001,8 +3016,10 @@ void qemu_ram_free_from_ptr(ram_addr_t addr)
>>
>>       QLIST_FOREACH(block,&ram_list.blocks, next) {
>>           if (addr == block->offset) {
>> +            qemu_mutex_lock_ramlist();
>>               QLIST_REMOVE(block, next);
>>               QLIST_REMOVE(block, next_mru);
>> +            qemu_mutex_unlock_ramlist();
>>               qemu_free(block);
>>               return;
>>           }
>> @@ -3015,8 +3032,10 @@ void qemu_ram_free(ram_addr_t addr)
>>
>>       QLIST_FOREACH(block,&ram_list.blocks, next) {
>>           if (addr == block->offset) {
>> +            qemu_mutex_lock_ramlist();
>>               QLIST_REMOVE(block, next);
>>               QLIST_REMOVE(block, next_mru);
>> +            qemu_mutex_unlock_ramlist();
>>               if (block->flags&  RAM_PREALLOC_MASK) {
>>                   ;
>>               } else if (mem_path) {
>
> You must protect the whole QLIST_FOREACH.  Otherwise looks good.
Or, is it okay to convert all the ramblock list traversals in exec.c 
(under iothread) to mru traversals, and probably it makes sense as the 
original list was also maintained in the mru order, whereas the sequence 
of blocks doesn't matter for the migration code. This way we don't have 
to acquire the mutex for block list traversals.

- Umesh
Paolo Bonzini Aug. 22, 2011, 6:48 a.m. UTC | #3
On 08/19/2011 08:20 AM, Umesh Deshpande wrote:
> Or, is it okay to convert all the ramblock list traversals in exec.c
> (under iothread) to mru traversals, and probably it makes sense as the
> original list was also maintained in the mru order, whereas the sequence
> of blocks doesn't matter for the migration code. This way we don't have
> to acquire the mutex for block list traversals.

I'm not sure... as I said, the MRU list is on a fast path and 
restricting it to that fast path keeps us honest.  Also, the non-MRU 
list is almost never accessed outside the migration thread, so the mutex 
shouldn't be heavily contended anyway.  You can also think about (not 
too clever) ways to keep the mutex unlocked while not doing ram_save_live.

BTW, actually the migration code tries to migrate the largest blocks 
first (because usually all the blocks after the first are small and 
easily migrated during the _complete pass), so the order does somewhat 
matter for migration.

Paolo
Marcelo Tosatti Aug. 23, 2011, 9:15 a.m. UTC | #4
On Tue, Aug 16, 2011 at 11:56:37PM -0400, Umesh Deshpande wrote:
> ramlist mutex is implemented to protect the RAMBlock list traversal in the
> migration thread from their addition/removal from the iothread.
> 
> Signed-off-by: Umesh Deshpande <udeshpan@redhat.com>
> ---
>  cpu-all.h     |    2 ++
>  exec.c        |   19 +++++++++++++++++++
>  qemu-common.h |    2 ++
>  3 files changed, 23 insertions(+), 0 deletions(-)
> 
> diff --git a/cpu-all.h b/cpu-all.h
> index 6b217a2..eab9803 100644
> --- a/cpu-all.h
> +++ b/cpu-all.h
> @@ -21,6 +21,7 @@
>  
>  #include "qemu-common.h"
>  #include "cpu-common.h"
> +#include "qemu-thread.h"
>  
>  /* some important defines:
>   *
> @@ -932,6 +933,7 @@ typedef struct RAMBlock {
>  } RAMBlock;
>  
>  typedef struct RAMList {
> +    QemuMutex mutex;
>      uint8_t *phys_dirty;
>      QLIST_HEAD(ram, RAMBlock) blocks;
>      QLIST_HEAD(, RAMBlock) blocks_mru;

A comment on what the mutex protects would be good.
Marcelo Tosatti Aug. 23, 2011, 9:17 a.m. UTC | #5
On Tue, Aug 23, 2011 at 06:15:33AM -0300, Marcelo Tosatti wrote:
> On Tue, Aug 16, 2011 at 11:56:37PM -0400, Umesh Deshpande wrote:
> > ramlist mutex is implemented to protect the RAMBlock list traversal in the
> > migration thread from their addition/removal from the iothread.
> > 
> > Signed-off-by: Umesh Deshpande <udeshpan@redhat.com>
> > ---
> >  cpu-all.h     |    2 ++
> >  exec.c        |   19 +++++++++++++++++++
> >  qemu-common.h |    2 ++
> >  3 files changed, 23 insertions(+), 0 deletions(-)
> > 
> > diff --git a/cpu-all.h b/cpu-all.h
> > index 6b217a2..eab9803 100644
> > --- a/cpu-all.h
> > +++ b/cpu-all.h
> > @@ -21,6 +21,7 @@
> >  
> >  #include "qemu-common.h"
> >  #include "cpu-common.h"
> > +#include "qemu-thread.h"
> >  
> >  /* some important defines:
> >   *
> > @@ -932,6 +933,7 @@ typedef struct RAMBlock {
> >  } RAMBlock;
> >  
> >  typedef struct RAMList {
> > +    QemuMutex mutex;
> >      uint8_t *phys_dirty;
> >      QLIST_HEAD(ram, RAMBlock) blocks;
> >      QLIST_HEAD(, RAMBlock) blocks_mru;
> 
> A comment on what the mutex protects would be good.

And on the lock ordering.
Paolo Bonzini Aug. 23, 2011, 11:41 a.m. UTC | #6
On 08/23/2011 11:17 AM, Marcelo Tosatti wrote:
>>> >  >    typedef struct RAMList {
>>> >  >  +    QemuMutex mutex;
>>> >  >        uint8_t *phys_dirty;
>>> >  >        QLIST_HEAD(ram, RAMBlock) blocks;
>>> >  >        QLIST_HEAD(, RAMBlock) blocks_mru;
>> >
>> >  A comment on what the mutex protects would be good.

Indeed, especially because Umesh wanted to use the ramlist+iothread 
combo as a rw-lock: iothread = read-lock for the I/O thread, ramlist = 
read-lock for the migration thread, together = exclusive (write) lock. 
But I think I talked him out of this. :)  It's not a bad idea in 
general, it just sounds like overkill in this case.

> And on the lock ordering.

I think when only two locks are involved, we can always assume iothread 
is outer and the other is inner.  Do you agree?

Paolo
Marcelo Tosatti Aug. 23, 2011, 12:16 p.m. UTC | #7
On Tue, Aug 23, 2011 at 01:41:48PM +0200, Paolo Bonzini wrote:
> On 08/23/2011 11:17 AM, Marcelo Tosatti wrote:
> >>>>  >    typedef struct RAMList {
> >>>>  >  +    QemuMutex mutex;
> >>>>  >        uint8_t *phys_dirty;
> >>>>  >        QLIST_HEAD(ram, RAMBlock) blocks;
> >>>>  >        QLIST_HEAD(, RAMBlock) blocks_mru;
> >>>
> >>>  A comment on what the mutex protects would be good.
> 
> Indeed, especially because Umesh wanted to use the ramlist+iothread
> combo as a rw-lock: iothread = read-lock for the I/O thread, ramlist
> = read-lock for the migration thread, together = exclusive (write)
> lock. But I think I talked him out of this. :)  It's not a bad idea
> in general, it just sounds like overkill in this case.
> 
> >And on the lock ordering.
> 
> I think when only two locks are involved, we can always assume
> iothread is outer and the other is inner.  Do you agree?
> 
> Paolo

Yep.
diff mbox

Patch

diff --git a/cpu-all.h b/cpu-all.h
index 6b217a2..eab9803 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -21,6 +21,7 @@ 
 
 #include "qemu-common.h"
 #include "cpu-common.h"
+#include "qemu-thread.h"
 
 /* some important defines:
  *
@@ -932,6 +933,7 @@  typedef struct RAMBlock {
 } RAMBlock;
 
 typedef struct RAMList {
+    QemuMutex mutex;
     uint8_t *phys_dirty;
     QLIST_HEAD(ram, RAMBlock) blocks;
     QLIST_HEAD(, RAMBlock) blocks_mru;
diff --git a/exec.c b/exec.c
index c5c247c..404d8ea 100644
--- a/exec.c
+++ b/exec.c
@@ -582,6 +582,7 @@  void cpu_exec_init_all(unsigned long tb_size)
     code_gen_alloc(tb_size);
     code_gen_ptr = code_gen_buffer;
     page_init();
+    qemu_mutex_init(&ram_list.mutex);
 #if !defined(CONFIG_USER_ONLY)
     io_mem_init();
 #endif
@@ -2802,6 +2803,16 @@  static long gethugepagesize(const char *path)
     return fs.f_bsize;
 }
 
+void qemu_mutex_lock_ramlist(void)
+{
+    qemu_mutex_lock(&ram_list.mutex);
+}
+
+void qemu_mutex_unlock_ramlist(void)
+{
+    qemu_mutex_unlock(&ram_list.mutex);
+}
+
 static void *file_ram_alloc(RAMBlock *block,
                             ram_addr_t memory,
                             const char *path)
@@ -2976,6 +2987,8 @@  ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
     }
     new_block->length = size;
 
+    qemu_mutex_lock_ramlist();
+
     QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
     QLIST_INSERT_HEAD(&ram_list.blocks_mru, new_block, next_mru);
 
@@ -2984,6 +2997,8 @@  ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
     memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
            0xff, size >> TARGET_PAGE_BITS);
 
+    qemu_mutex_unlock_ramlist();
+
     if (kvm_enabled())
         kvm_setup_guest_memory(new_block->host, size);
 
@@ -3001,8 +3016,10 @@  void qemu_ram_free_from_ptr(ram_addr_t addr)
 
     QLIST_FOREACH(block, &ram_list.blocks, next) {
         if (addr == block->offset) {
+            qemu_mutex_lock_ramlist();
             QLIST_REMOVE(block, next);
             QLIST_REMOVE(block, next_mru);
+            qemu_mutex_unlock_ramlist();
             qemu_free(block);
             return;
         }
@@ -3015,8 +3032,10 @@  void qemu_ram_free(ram_addr_t addr)
 
     QLIST_FOREACH(block, &ram_list.blocks, next) {
         if (addr == block->offset) {
+            qemu_mutex_lock_ramlist();
             QLIST_REMOVE(block, next);
             QLIST_REMOVE(block, next_mru);
+            qemu_mutex_unlock_ramlist();
             if (block->flags & RAM_PREALLOC_MASK) {
                 ;
             } else if (mem_path) {
diff --git a/qemu-common.h b/qemu-common.h
index abd7a75..b802883 100644
--- a/qemu-common.h
+++ b/qemu-common.h
@@ -212,6 +212,8 @@  char *qemu_strndup(const char *str, size_t size);
 
 void qemu_mutex_lock_iothread(void);
 void qemu_mutex_unlock_iothread(void);
+void qemu_mutex_lock_ramlist(void);
+void qemu_mutex_unlock_ramlist(void);
 
 int qemu_open(const char *name, int flags, ...);
 ssize_t qemu_write_full(int fd, const void *buf, size_t count)