diff mbox

[8/8] timers: make icount thread-safe

Message ID 1381222058-16701-9-git-send-email-pbonzini@redhat.com
State New
Headers show

Commit Message

Paolo Bonzini Oct. 8, 2013, 8:47 a.m. UTC
This lets threads other than the I/O thread use vm_clock even in -icount mode.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 cpus.c | 42 ++++++++++++++++++++++++++++++++++--------
 1 file changed, 34 insertions(+), 8 deletions(-)

Comments

Alex Bligh Oct. 8, 2013, 4:57 p.m. UTC | #1
On 8 Oct 2013, at 09:47, Paolo Bonzini wrote:

> This lets threads other than the I/O thread use vm_clock even in -icount mode.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

Looks good to me

Alex

> ---
> cpus.c | 42 ++++++++++++++++++++++++++++++++++--------
> 1 file changed, 34 insertions(+), 8 deletions(-)
> 
> diff --git a/cpus.c b/cpus.c
> index bc675a4..1e5cba4 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -133,7 +133,7 @@ typedef struct TimersState {
> static TimersState timers_state;
> 
> /* Return the virtual CPU time, based on the instruction counter.  */
> -int64_t cpu_get_icount(void)
> +static int64_t cpu_get_icount_locked(void)
> {
>     int64_t icount;
>     CPUState *cpu = current_cpu;
> @@ -149,6 +149,19 @@ int64_t cpu_get_icount(void)
>     return qemu_icount_bias + (icount << icount_time_shift);
> }
> 
> +int64_t cpu_get_icount(void)
> +{
> +    int64_t icount;
> +    unsigned start;
> +
> +    do {
> +        start = seqlock_read_begin(&timers_state.clock_seqlock);
> +        icount = cpu_get_icount_locked();
> +    } while (seqlock_read_retry(&timers_state.clock_seqlock, start));
> +
> +    return icount;
> +}
> +
> /* return the host CPU cycle counter and handle stop/restart */
> /* cpu_ticks is safely if holding BQL */
> int64_t cpu_get_ticks(void)
> @@ -246,8 +259,9 @@ static void icount_adjust(void)
>         return;
>     }
> 
> -    cur_time = cpu_get_clock();
> -    cur_icount = cpu_get_icount();
> +    seqlock_write_lock(&timers_state.clock_seqlock);
> +    cur_time = cpu_get_clock_locked();
> +    cur_icount = cpu_get_icount_locked();
> 
>     delta = cur_icount - cur_time;
>     /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
> @@ -265,6 +279,7 @@ static void icount_adjust(void)
>     }
>     last_delta = delta;
>     qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
> +    seqlock_write_unlock(&timers_state.clock_seqlock);
> }
> 
> static void icount_adjust_rt(void *opaque)
> @@ -289,10 +304,14 @@ static int64_t qemu_icount_round(int64_t count)
> 
> static void icount_warp_rt(void *opaque)
> {
> -    if (vm_clock_warp_start == -1) {
> +    /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
> +     * changes from -1 to another value, so the race here is okay.
> +     */
> +    if (atomic_read(&vm_clock_warp_start) == -1) {
>         return;
>     }
> 
> +    seqlock_write_lock(&timers_state.clock_seqlock);
>     if (runstate_is_running()) {
>         int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
>         int64_t warp_delta;
> @@ -303,14 +322,15 @@ static void icount_warp_rt(void *opaque)
>              * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
>              * far ahead of real time.
>              */
> -            int64_t cur_time = cpu_get_clock();
> -            int64_t cur_icount = cpu_get_icount();
> +            int64_t cur_time = cpu_get_clock_locked();
> +            int64_t cur_icount = cpu_get_icount_locked();
>             int64_t delta = cur_time - cur_icount;
>             warp_delta = MIN(warp_delta, delta);
>         }
>         qemu_icount_bias += warp_delta;
>     }
>     vm_clock_warp_start = -1;
> +    seqlock_write_unlock(&timers_state.clock_seqlock);
> 
>     if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
>         qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
> @@ -324,7 +344,10 @@ void qtest_clock_warp(int64_t dest)
>     while (clock < dest) {
>         int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
>         int64_t warp = MIN(dest - clock, deadline);
> +        seqlock_write_lock(&timers_state.clock_seqlock);
>         qemu_icount_bias += warp;
> +        seqlock_write_unlock(&timers_state.clock_seqlock);
> +
>         qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
>         clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
>     }
> @@ -391,9 +415,11 @@ void qemu_clock_warp(QEMUClockType type)
>          * you will not be sending network packets continuously instead of
>          * every 100ms.
>          */
> +        seqlock_write_lock(&timers_state.clock_seqlock);
>         if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
>             vm_clock_warp_start = clock;
>         }
> +        seqlock_write_unlock(&timers_state.clock_seqlock);
>         timer_mod_anticipate(icount_warp_timer, clock + deadline);
>     } else if (deadline == 0) {
>         qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
> -- 
> 1.8.3.1
> 
> 
>
diff mbox

Patch

diff --git a/cpus.c b/cpus.c
index bc675a4..1e5cba4 100644
--- a/cpus.c
+++ b/cpus.c
@@ -133,7 +133,7 @@  typedef struct TimersState {
 static TimersState timers_state;
 
 /* Return the virtual CPU time, based on the instruction counter.  */
-int64_t cpu_get_icount(void)
+static int64_t cpu_get_icount_locked(void)
 {
     int64_t icount;
     CPUState *cpu = current_cpu;
@@ -149,6 +149,19 @@  int64_t cpu_get_icount(void)
     return qemu_icount_bias + (icount << icount_time_shift);
 }
 
+int64_t cpu_get_icount(void)
+{
+    int64_t icount;
+    unsigned start;
+
+    do {
+        start = seqlock_read_begin(&timers_state.clock_seqlock);
+        icount = cpu_get_icount_locked();
+    } while (seqlock_read_retry(&timers_state.clock_seqlock, start));
+
+    return icount;
+}
+
 /* return the host CPU cycle counter and handle stop/restart */
 /* cpu_ticks is safely if holding BQL */
 int64_t cpu_get_ticks(void)
@@ -246,8 +259,9 @@  static void icount_adjust(void)
         return;
     }
 
-    cur_time = cpu_get_clock();
-    cur_icount = cpu_get_icount();
+    seqlock_write_lock(&timers_state.clock_seqlock);
+    cur_time = cpu_get_clock_locked();
+    cur_icount = cpu_get_icount_locked();
 
     delta = cur_icount - cur_time;
     /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
@@ -265,6 +279,7 @@  static void icount_adjust(void)
     }
     last_delta = delta;
     qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
+    seqlock_write_unlock(&timers_state.clock_seqlock);
 }
 
 static void icount_adjust_rt(void *opaque)
@@ -289,10 +304,14 @@  static int64_t qemu_icount_round(int64_t count)
 
 static void icount_warp_rt(void *opaque)
 {
-    if (vm_clock_warp_start == -1) {
+    /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
+     * changes from -1 to another value, so the race here is okay.
+     */
+    if (atomic_read(&vm_clock_warp_start) == -1) {
         return;
     }
 
+    seqlock_write_lock(&timers_state.clock_seqlock);
     if (runstate_is_running()) {
         int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
         int64_t warp_delta;
@@ -303,14 +322,15 @@  static void icount_warp_rt(void *opaque)
              * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
              * far ahead of real time.
              */
-            int64_t cur_time = cpu_get_clock();
-            int64_t cur_icount = cpu_get_icount();
+            int64_t cur_time = cpu_get_clock_locked();
+            int64_t cur_icount = cpu_get_icount_locked();
             int64_t delta = cur_time - cur_icount;
             warp_delta = MIN(warp_delta, delta);
         }
         qemu_icount_bias += warp_delta;
     }
     vm_clock_warp_start = -1;
+    seqlock_write_unlock(&timers_state.clock_seqlock);
 
     if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
         qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
@@ -324,7 +344,10 @@  void qtest_clock_warp(int64_t dest)
     while (clock < dest) {
         int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
         int64_t warp = MIN(dest - clock, deadline);
+        seqlock_write_lock(&timers_state.clock_seqlock);
         qemu_icount_bias += warp;
+        seqlock_write_unlock(&timers_state.clock_seqlock);
+
         qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
         clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
     }
@@ -391,9 +415,11 @@  void qemu_clock_warp(QEMUClockType type)
          * you will not be sending network packets continuously instead of
          * every 100ms.
          */
+        seqlock_write_lock(&timers_state.clock_seqlock);
         if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
             vm_clock_warp_start = clock;
         }
+        seqlock_write_unlock(&timers_state.clock_seqlock);
         timer_mod_anticipate(icount_warp_timer, clock + deadline);
     } else if (deadline == 0) {
         qemu_clock_notify(QEMU_CLOCK_VIRTUAL);