diff mbox

[RFC,v2,1/5] timer: protect timers_state with lock

Message ID 1375067768-11342-2-git-send-email-pingfank@linux.vnet.ibm.com
State New
Headers show

Commit Message

pingfan liu July 29, 2013, 3:16 a.m. UTC
In kvm mode, vm_clock may be read on AioContexts outside BQL(next
patch). This will make timers_state --the foundation of vm_clock
exposed to race condition. Using private lock to protect it.
Note in tcg mode, vm_clock still read inside BQL, so icount is
left without change.

Lock rule: private lock innermost, ie BQL->"this lock"

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 cpus.c | 26 +++++++++++++++++++++-----
 1 file changed, 21 insertions(+), 5 deletions(-)

Comments

Paolo Bonzini July 29, 2013, 6:26 a.m. UTC | #1
Il 29/07/2013 05:16, Liu Ping Fan ha scritto:
> In kvm mode, vm_clock may be read on AioContexts outside BQL(next
> patch). This will make timers_state --the foundation of vm_clock
> exposed to race condition. Using private lock to protect it.
> Note in tcg mode, vm_clock still read inside BQL, so icount is
> left without change.
> 
> Lock rule: private lock innermost, ie BQL->"this lock"

This is quite expensive; on the other hand it is probably a good thing
to do even without the other patches.

Can you use the seqlock primitive we initially worked on for the memory
dispatch (with the BQL on the write side)?

Paolo

> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> ---
>  cpus.c | 26 +++++++++++++++++++++-----
>  1 file changed, 21 insertions(+), 5 deletions(-)
> 
> diff --git a/cpus.c b/cpus.c
> index 61e86a8..4af81e9 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -112,7 +112,9 @@ typedef struct TimersState {
>      int64_t dummy;
>  } TimersState;
>  
> -TimersState timers_state;
> +static TimersState timers_state;
> +/* lock rule: innermost */
> +static QemuMutex timers_state_lock;
>  
>  /* Return the virtual CPU time, based on the instruction counter.  */
>  int64_t cpu_get_icount(void)
> @@ -134,11 +136,14 @@ int64_t cpu_get_icount(void)
>  /* return the host CPU cycle counter and handle stop/restart */
>  int64_t cpu_get_ticks(void)
>  {
> +    int64_t ret;
> +
>      if (use_icount) {
>          return cpu_get_icount();
>      }
> +    qemu_mutex_lock(&timers_state_lock);
>      if (!timers_state.cpu_ticks_enabled) {
> -        return timers_state.cpu_ticks_offset;
> +        ret = timers_state.cpu_ticks_offset;
>      } else {
>          int64_t ticks;
>          ticks = cpu_get_real_ticks();
> @@ -148,41 +153,51 @@ int64_t cpu_get_ticks(void)
>              timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
>          }
>          timers_state.cpu_ticks_prev = ticks;
> -        return ticks + timers_state.cpu_ticks_offset;
> +        ret = ticks + timers_state.cpu_ticks_offset;
>      }
> +    qemu_mutex_unlock(&timers_state_lock);
> +    return ret;
>  }
>  
>  /* return the host CPU monotonic timer and handle stop/restart */
>  int64_t cpu_get_clock(void)
>  {
>      int64_t ti;
> +
> +    qemu_mutex_lock(&timers_state_lock);
>      if (!timers_state.cpu_ticks_enabled) {
> -        return timers_state.cpu_clock_offset;
> +        ti = timers_state.cpu_clock_offset;
>      } else {
>          ti = get_clock();
> -        return ti + timers_state.cpu_clock_offset;
> +        ti += timers_state.cpu_clock_offset;
>      }
> +    qemu_mutex_unlock(&timers_state_lock);
> +    return ti;
>  }
>  
>  /* enable cpu_get_ticks() */
>  void cpu_enable_ticks(void)
>  {
> +    qemu_mutex_lock(&timers_state_lock);
>      if (!timers_state.cpu_ticks_enabled) {
>          timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
>          timers_state.cpu_clock_offset -= get_clock();
>          timers_state.cpu_ticks_enabled = 1;
>      }
> +    qemu_mutex_unlock(&timers_state_lock);
>  }
>  
>  /* disable cpu_get_ticks() : the clock is stopped. You must not call
>     cpu_get_ticks() after that.  */
>  void cpu_disable_ticks(void)
>  {
> +    qemu_mutex_lock(&timers_state_lock);
>      if (timers_state.cpu_ticks_enabled) {
>          timers_state.cpu_ticks_offset = cpu_get_ticks();
>          timers_state.cpu_clock_offset = cpu_get_clock();
>          timers_state.cpu_ticks_enabled = 0;
>      }
> +    qemu_mutex_unlock(&timers_state_lock);
>  }
>  
>  /* Correlation between real and virtual time is always going to be
> @@ -353,6 +368,7 @@ static const VMStateDescription vmstate_timers = {
>  
>  void configure_icount(const char *option)
>  {
> +    qemu_mutex_init(&timers_state_lock);
>      vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
>      if (!option) {
>          return;
>
pingfan liu July 29, 2013, 8:01 a.m. UTC | #2
On Mon, Jul 29, 2013 at 2:26 PM, Paolo Bonzini <pbonzini@redhat.com> wrote:
> Il 29/07/2013 05:16, Liu Ping Fan ha scritto:
>> In kvm mode, vm_clock may be read on AioContexts outside BQL(next
>> patch). This will make timers_state --the foundation of vm_clock
>> exposed to race condition. Using private lock to protect it.
>> Note in tcg mode, vm_clock still read inside BQL, so icount is
>> left without change.
>>
>> Lock rule: private lock innermost, ie BQL->"this lock"
>
> This is quite expensive; on the other hand it is probably a good thing
> to do even without the other patches.
>
> Can you use the seqlock primitive we initially worked on for the memory
> dispatch (with the BQL on the write side)?
>
Fine, of course, seqlock is a better one.

Thanks,
Pingfan
> Paolo
>
>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>> ---
>>  cpus.c | 26 +++++++++++++++++++++-----
>>  1 file changed, 21 insertions(+), 5 deletions(-)
>>
>> diff --git a/cpus.c b/cpus.c
>> index 61e86a8..4af81e9 100644
>> --- a/cpus.c
>> +++ b/cpus.c
>> @@ -112,7 +112,9 @@ typedef struct TimersState {
>>      int64_t dummy;
>>  } TimersState;
>>
>> -TimersState timers_state;
>> +static TimersState timers_state;
>> +/* lock rule: innermost */
>> +static QemuMutex timers_state_lock;
>>
>>  /* Return the virtual CPU time, based on the instruction counter.  */
>>  int64_t cpu_get_icount(void)
>> @@ -134,11 +136,14 @@ int64_t cpu_get_icount(void)
>>  /* return the host CPU cycle counter and handle stop/restart */
>>  int64_t cpu_get_ticks(void)
>>  {
>> +    int64_t ret;
>> +
>>      if (use_icount) {
>>          return cpu_get_icount();
>>      }
>> +    qemu_mutex_lock(&timers_state_lock);
>>      if (!timers_state.cpu_ticks_enabled) {
>> -        return timers_state.cpu_ticks_offset;
>> +        ret = timers_state.cpu_ticks_offset;
>>      } else {
>>          int64_t ticks;
>>          ticks = cpu_get_real_ticks();
>> @@ -148,41 +153,51 @@ int64_t cpu_get_ticks(void)
>>              timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
>>          }
>>          timers_state.cpu_ticks_prev = ticks;
>> -        return ticks + timers_state.cpu_ticks_offset;
>> +        ret = ticks + timers_state.cpu_ticks_offset;
>>      }
>> +    qemu_mutex_unlock(&timers_state_lock);
>> +    return ret;
>>  }
>>
>>  /* return the host CPU monotonic timer and handle stop/restart */
>>  int64_t cpu_get_clock(void)
>>  {
>>      int64_t ti;
>> +
>> +    qemu_mutex_lock(&timers_state_lock);
>>      if (!timers_state.cpu_ticks_enabled) {
>> -        return timers_state.cpu_clock_offset;
>> +        ti = timers_state.cpu_clock_offset;
>>      } else {
>>          ti = get_clock();
>> -        return ti + timers_state.cpu_clock_offset;
>> +        ti += timers_state.cpu_clock_offset;
>>      }
>> +    qemu_mutex_unlock(&timers_state_lock);
>> +    return ti;
>>  }
>>
>>  /* enable cpu_get_ticks() */
>>  void cpu_enable_ticks(void)
>>  {
>> +    qemu_mutex_lock(&timers_state_lock);
>>      if (!timers_state.cpu_ticks_enabled) {
>>          timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
>>          timers_state.cpu_clock_offset -= get_clock();
>>          timers_state.cpu_ticks_enabled = 1;
>>      }
>> +    qemu_mutex_unlock(&timers_state_lock);
>>  }
>>
>>  /* disable cpu_get_ticks() : the clock is stopped. You must not call
>>     cpu_get_ticks() after that.  */
>>  void cpu_disable_ticks(void)
>>  {
>> +    qemu_mutex_lock(&timers_state_lock);
>>      if (timers_state.cpu_ticks_enabled) {
>>          timers_state.cpu_ticks_offset = cpu_get_ticks();
>>          timers_state.cpu_clock_offset = cpu_get_clock();
>>          timers_state.cpu_ticks_enabled = 0;
>>      }
>> +    qemu_mutex_unlock(&timers_state_lock);
>>  }
>>
>>  /* Correlation between real and virtual time is always going to be
>> @@ -353,6 +368,7 @@ static const VMStateDescription vmstate_timers = {
>>
>>  void configure_icount(const char *option)
>>  {
>> +    qemu_mutex_init(&timers_state_lock);
>>      vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
>>      if (!option) {
>>          return;
>>
>
diff mbox

Patch

diff --git a/cpus.c b/cpus.c
index 61e86a8..4af81e9 100644
--- a/cpus.c
+++ b/cpus.c
@@ -112,7 +112,9 @@  typedef struct TimersState {
     int64_t dummy;
 } TimersState;
 
-TimersState timers_state;
+static TimersState timers_state;
+/* lock rule: innermost */
+static QemuMutex timers_state_lock;
 
 /* Return the virtual CPU time, based on the instruction counter.  */
 int64_t cpu_get_icount(void)
@@ -134,11 +136,14 @@  int64_t cpu_get_icount(void)
 /* return the host CPU cycle counter and handle stop/restart */
 int64_t cpu_get_ticks(void)
 {
+    int64_t ret;
+
     if (use_icount) {
         return cpu_get_icount();
     }
+    qemu_mutex_lock(&timers_state_lock);
     if (!timers_state.cpu_ticks_enabled) {
-        return timers_state.cpu_ticks_offset;
+        ret = timers_state.cpu_ticks_offset;
     } else {
         int64_t ticks;
         ticks = cpu_get_real_ticks();
@@ -148,41 +153,51 @@  int64_t cpu_get_ticks(void)
             timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
         }
         timers_state.cpu_ticks_prev = ticks;
-        return ticks + timers_state.cpu_ticks_offset;
+        ret = ticks + timers_state.cpu_ticks_offset;
     }
+    qemu_mutex_unlock(&timers_state_lock);
+    return ret;
 }
 
 /* return the host CPU monotonic timer and handle stop/restart */
 int64_t cpu_get_clock(void)
 {
     int64_t ti;
+
+    qemu_mutex_lock(&timers_state_lock);
     if (!timers_state.cpu_ticks_enabled) {
-        return timers_state.cpu_clock_offset;
+        ti = timers_state.cpu_clock_offset;
     } else {
         ti = get_clock();
-        return ti + timers_state.cpu_clock_offset;
+        ti += timers_state.cpu_clock_offset;
     }
+    qemu_mutex_unlock(&timers_state_lock);
+    return ti;
 }
 
 /* enable cpu_get_ticks() */
 void cpu_enable_ticks(void)
 {
+    qemu_mutex_lock(&timers_state_lock);
     if (!timers_state.cpu_ticks_enabled) {
         timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
         timers_state.cpu_clock_offset -= get_clock();
         timers_state.cpu_ticks_enabled = 1;
     }
+    qemu_mutex_unlock(&timers_state_lock);
 }
 
 /* disable cpu_get_ticks() : the clock is stopped. You must not call
    cpu_get_ticks() after that.  */
 void cpu_disable_ticks(void)
 {
+    qemu_mutex_lock(&timers_state_lock);
     if (timers_state.cpu_ticks_enabled) {
         timers_state.cpu_ticks_offset = cpu_get_ticks();
         timers_state.cpu_clock_offset = cpu_get_clock();
         timers_state.cpu_ticks_enabled = 0;
     }
+    qemu_mutex_unlock(&timers_state_lock);
 }
 
 /* Correlation between real and virtual time is always going to be
@@ -353,6 +368,7 @@  static const VMStateDescription vmstate_timers = {
 
 void configure_icount(const char *option)
 {
+    qemu_mutex_init(&timers_state_lock);
     vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
     if (!option) {
         return;