Patchwork [RFC,v4,15/15] slirp: use lock to protect the slirp_instances

login
register
mail settings
Submitter pingfan liu
Date April 17, 2013, 8:39 a.m.
Message ID <1366187964-14265-16-git-send-email-qemulist@gmail.com>
Download mbox | patch
Permalink /patch/237179/
State New
Headers show

Comments

pingfan liu - April 17, 2013, 8:39 a.m.
From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>

slirps will run on dedicated thread, so need to protect the global
list.

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 include/qemu/module.h |    2 ++
 slirp/slirp.c         |   20 ++++++++++++++++++++
 2 files changed, 22 insertions(+), 0 deletions(-)
Jan Kiszka - April 18, 2013, 7:20 a.m.
On 2013-04-17 10:39, Liu Ping Fan wrote:
> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> 
> slirps will run on dedicated thread, so need to protect the global
> list.
> 
> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> ---
>  include/qemu/module.h |    2 ++
>  slirp/slirp.c         |   20 ++++++++++++++++++++
>  2 files changed, 22 insertions(+), 0 deletions(-)
> 
> diff --git a/include/qemu/module.h b/include/qemu/module.h
> index c4ccd57..2720943 100644
> --- a/include/qemu/module.h
> +++ b/include/qemu/module.h
> @@ -22,6 +22,7 @@ static void __attribute__((constructor)) do_qemu_init_ ## function(void) {  \
>  
>  typedef enum {
>      MODULE_INIT_BLOCK,
> +    MODULE_INIT_SLIRP,
>      MODULE_INIT_MACHINE,
>      MODULE_INIT_QAPI,
>      MODULE_INIT_QOM,
> @@ -29,6 +30,7 @@ typedef enum {
>  } module_init_type;
>  
>  #define block_init(function) module_init(function, MODULE_INIT_BLOCK)
> +#define slirplayer_init(function) module_init(function, MODULE_INIT_SLIRP)
>  #define machine_init(function) module_init(function, MODULE_INIT_MACHINE)
>  #define qapi_init(function) module_init(function, MODULE_INIT_QAPI)
>  #define type_init(function) module_init(function, MODULE_INIT_QOM)
> diff --git a/slirp/slirp.c b/slirp/slirp.c
> index 6bfcc67..4cbf04d 100644
> --- a/slirp/slirp.c
> +++ b/slirp/slirp.c
> @@ -42,6 +42,7 @@ static const uint8_t zero_ethaddr[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
>  
>  u_int curtime;
>  
> +static QemuMutex slirp_instances_lock;
>  static QTAILQ_HEAD(slirp_instances, Slirp) slirp_instances =
>      QTAILQ_HEAD_INITIALIZER(slirp_instances);
>  
> @@ -236,14 +237,18 @@ Slirp *slirp_init(int restricted, struct in_addr vnetwork,
>      register_savevm(NULL, "slirp", 0, 3,
>                      slirp_state_save, slirp_state_load, slirp);
>  
> +    qemu_mutex_lock(&slirp_instances_lock);
>      QTAILQ_INSERT_TAIL(&slirp_instances, slirp, entry);
> +    qemu_mutex_unlock(&slirp_instances_lock);
>  
>      return slirp;
>  }
>  
>  void slirp_cleanup(Slirp *slirp)
>  {
> +    qemu_mutex_lock(&slirp_instances_lock);
>      QTAILQ_REMOVE(&slirp_instances, slirp, entry);
> +    qemu_mutex_unlock(&slirp_instances_lock);
>  
>      unregister_savevm(NULL, "slirp", slirp);
>  
> @@ -262,9 +267,12 @@ void slirp_cleanup(Slirp *slirp)
>  
>  void slirp_update_timeout(uint32_t *timeout)
>  {
> +    qemu_mutex_lock(&slirp_instances_lock);
>      if (!QTAILQ_EMPTY(&slirp_instances)) {
>          *timeout = MIN(1000, *timeout);
>      }
> +    qemu_mutex_unlock(&slirp_instances_lock);
> +
>      curtime = qemu_get_clock_ms(rt_clock);
>  }
>  
> @@ -1140,3 +1148,15 @@ static int slirp_state_load(QEMUFile *f, void *opaque, int version_id)
>  
>      return 0;
>  }
> +
> +static void slirplayer_cleanup(void)
> +{
> +    qemu_mutex_destroy(&slirp_instances_lock);
> +}
> +
> +static void slirplayer_bootup(void)
> +{
> +    qemu_mutex_init(&slirp_instances_lock);
> +    atexit(&slirplayer_cleanup);
> +}
> +slirplayer_init(slirplayer_bootup)
> 

grep'ing for slirp_instances points to more spots that work with that
list (QTAILQ_FOREACH, QTAILQ_EMPTY, ...). So the same question here:
What are the usage rules? When do I _not_ need it when touching the list
of instances, and why?

Well, I started reading at the top, but there are more lock-adding
patches in this series. And the more locks we have, the higher the
probability of ABBA gets. Therefore, please document from the beginning
the lock order rules that shall prevent it (which may also be "never
take other locks while holding this one" or "never hold other locks when
taking this one").

Jan
Paolo Bonzini - April 18, 2013, 2:16 p.m.
> grep'ing for slirp_instances points to more spots that work with that
> list (QTAILQ_FOREACH, QTAILQ_EMPTY, ...). So the same question here:
> What are the usage rules? When do I _not_ need it when touching the list
> of instances, and why?
> 
> Well, I started reading at the top, but there are more lock-adding
> patches in this series. And the more locks we have, the higher the
> probability of ABBA gets. Therefore, please document from the beginning
> the lock order rules that shall prevent it (which may also be "never
> take other locks while holding this one" or "never hold other locks when
> taking this one").

Yeah, the only sane ordering rules should be "hold nothing or just
the BQL when taking this one".  Everything else needs a very good
justification...

Paolo
pingfan liu - April 19, 2013, 6:13 a.m.
On Thu, Apr 18, 2013 at 10:16 PM, Paolo Bonzini <pbonzini@redhat.com> wrote:
>
>> grep'ing for slirp_instances points to more spots that work with that
>> list (QTAILQ_FOREACH, QTAILQ_EMPTY, ...). So the same question here:
>> What are the usage rules? When do I _not_ need it when touching the list
>> of instances, and why?
>>
>> Well, I started reading at the top, but there are more lock-adding
>> patches in this series. And the more locks we have, the higher the
>> probability of ABBA gets. Therefore, please document from the beginning
>> the lock order rules that shall prevent it (which may also be "never
>> take other locks while holding this one" or "never hold other locks when
>> taking this one").
>
slrip->lock is used to protect the frontend and backend to touch
slirp's content at the same time.  While slirp_instances_lock is used
to protected the list ops only.  For example, when the SlirpState's
refcnt comes to zero(this can not happen with holding slirp->lock),
finalize is called, and finally, slirp_cleanup is called to remove
slrip from the list.  The two locks have no overlap.

Regards, Pingfan
> Yeah, the only sane ordering rules should be "hold nothing or just
> the BQL when taking this one".  Everything else needs a very good
> justification...
>
Ok, will notice.
Thanks,  Pingfan
> Paolo

Patch

diff --git a/include/qemu/module.h b/include/qemu/module.h
index c4ccd57..2720943 100644
--- a/include/qemu/module.h
+++ b/include/qemu/module.h
@@ -22,6 +22,7 @@  static void __attribute__((constructor)) do_qemu_init_ ## function(void) {  \
 
 typedef enum {
     MODULE_INIT_BLOCK,
+    MODULE_INIT_SLIRP,
     MODULE_INIT_MACHINE,
     MODULE_INIT_QAPI,
     MODULE_INIT_QOM,
@@ -29,6 +30,7 @@  typedef enum {
 } module_init_type;
 
 #define block_init(function) module_init(function, MODULE_INIT_BLOCK)
+#define slirplayer_init(function) module_init(function, MODULE_INIT_SLIRP)
 #define machine_init(function) module_init(function, MODULE_INIT_MACHINE)
 #define qapi_init(function) module_init(function, MODULE_INIT_QAPI)
 #define type_init(function) module_init(function, MODULE_INIT_QOM)
diff --git a/slirp/slirp.c b/slirp/slirp.c
index 6bfcc67..4cbf04d 100644
--- a/slirp/slirp.c
+++ b/slirp/slirp.c
@@ -42,6 +42,7 @@  static const uint8_t zero_ethaddr[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
 
 u_int curtime;
 
+static QemuMutex slirp_instances_lock;
 static QTAILQ_HEAD(slirp_instances, Slirp) slirp_instances =
     QTAILQ_HEAD_INITIALIZER(slirp_instances);
 
@@ -236,14 +237,18 @@  Slirp *slirp_init(int restricted, struct in_addr vnetwork,
     register_savevm(NULL, "slirp", 0, 3,
                     slirp_state_save, slirp_state_load, slirp);
 
+    qemu_mutex_lock(&slirp_instances_lock);
     QTAILQ_INSERT_TAIL(&slirp_instances, slirp, entry);
+    qemu_mutex_unlock(&slirp_instances_lock);
 
     return slirp;
 }
 
 void slirp_cleanup(Slirp *slirp)
 {
+    qemu_mutex_lock(&slirp_instances_lock);
     QTAILQ_REMOVE(&slirp_instances, slirp, entry);
+    qemu_mutex_unlock(&slirp_instances_lock);
 
     unregister_savevm(NULL, "slirp", slirp);
 
@@ -262,9 +267,12 @@  void slirp_cleanup(Slirp *slirp)
 
 void slirp_update_timeout(uint32_t *timeout)
 {
+    qemu_mutex_lock(&slirp_instances_lock);
     if (!QTAILQ_EMPTY(&slirp_instances)) {
         *timeout = MIN(1000, *timeout);
     }
+    qemu_mutex_unlock(&slirp_instances_lock);
+
     curtime = qemu_get_clock_ms(rt_clock);
 }
 
@@ -1140,3 +1148,15 @@  static int slirp_state_load(QEMUFile *f, void *opaque, int version_id)
 
     return 0;
 }
+
+static void slirplayer_cleanup(void)
+{
+    qemu_mutex_destroy(&slirp_instances_lock);
+}
+
+static void slirplayer_bootup(void)
+{
+    qemu_mutex_init(&slirp_instances_lock);
+    atexit(&slirplayer_cleanup);
+}
+slirplayer_init(slirplayer_bootup)