Patchwork [RFC,v2,5/5] timer: run timers on aio_poll

login
register
mail settings
Submitter pingfan liu
Date July 29, 2013, 3:16 a.m.
Message ID <1375067768-11342-6-git-send-email-pingfank@linux.vnet.ibm.com>
Download mbox | patch
Permalink /patch/262656/
State New
Headers show

Comments

pingfan liu - July 29, 2013, 3:16 a.m.
Stop call timers in main loop and let each mini event-loop
run its own timers.

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 aio-posix.c          |  2 ++
 include/qemu/timer.h |  4 ++--
 main-loop.c          |  2 --
 qemu-timer.c         | 15 ++++++++++-----
 4 files changed, 14 insertions(+), 9 deletions(-)

Patch

diff --git a/aio-posix.c b/aio-posix.c
index b68eccd..53fdb1a 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -191,6 +191,8 @@  bool aio_poll(AioContext *ctx, bool blocking)
         progress = true;
     }
 
+    progress |= qemu_run_all_timers();
+
     if (progress && !blocking) {
         return true;
     }
diff --git a/include/qemu/timer.h b/include/qemu/timer.h
index 3e5016b..9ff8a68 100644
--- a/include/qemu/timer.h
+++ b/include/qemu/timer.h
@@ -61,8 +61,8 @@  bool qemu_timer_pending(QEMUTimer *ts);
 bool qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
 uint64_t qemu_timer_expire_time_ns(QEMUTimer *ts);
 
-void qemu_run_timers(QEMUClock *clock);
-void qemu_run_all_timers(void);
+bool qemu_run_timers(QEMUClock *clock);
+bool qemu_run_all_timers(void);
 void configure_alarms(char const *opt);
 void init_clocks(void);
 int init_timer_alarm(void);
diff --git a/main-loop.c b/main-loop.c
index 5fbdd4a..0214ed6 100644
--- a/main-loop.c
+++ b/main-loop.c
@@ -471,8 +471,6 @@  int main_loop_wait(int nonblocking)
     slirp_pollfds_poll(gpollfds, (ret < 0));
 #endif
 
-    qemu_run_all_timers();
-
     return ret;
 }
 
diff --git a/qemu-timer.c b/qemu-timer.c
index f15c3e6..8331e18 100644
--- a/qemu-timer.c
+++ b/qemu-timer.c
@@ -463,12 +463,13 @@  bool qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time)
     return qemu_timer_expired_ns(timer_head, current_time * timer_head->scale);
 }
 
-void qemu_run_timers(QEMUClock *clock)
+bool qemu_run_timers(QEMUClock *clock)
 {
     QEMUTimer *ts;
     int64_t current_time;
     TimerList *tlist;
     AioContext *ctx;
+    bool process = false;
 
     atomic_inc(&clock->using);
     if (unlikely(!clock->enabled)) {
@@ -494,6 +495,7 @@  void qemu_run_timers(QEMUClock *clock)
 
         /* run the callback (the timer list can be modified) */
         ts->cb(ts->opaque);
+        process = true;
     }
 
 exit:
@@ -504,6 +506,7 @@  exit:
         }
     }
     qemu_mutex_unlock(&clock->lock);
+    return process;
 }
 
 int64_t qemu_get_clock_ns(QEMUClock *clock)
@@ -555,16 +558,17 @@  uint64_t qemu_timer_expire_time_ns(QEMUTimer *ts)
     return qemu_timer_pending(ts) ? ts->expire_time : -1;
 }
 
-void qemu_run_all_timers(void)
+bool qemu_run_all_timers(void)
 {
     bool timer_modified;
+    bool process = false;
 
     alarm_timer->pending = false;
 
     /* vm time timers */
-    qemu_run_timers(vm_clock);
-    qemu_run_timers(rt_clock);
-    qemu_run_timers(host_clock);
+    process |= qemu_run_timers(vm_clock);
+    process |= qemu_run_timers(rt_clock);
+    process |= qemu_run_timers(host_clock);
 
     /* Check if qemu_mod_timer_ns() has been called */
     qemu_mutex_lock(&alarm_timer->timer_modified_lock);
@@ -580,6 +584,7 @@  void qemu_run_all_timers(void)
         qemu_rearm_alarm_timer(alarm_timer);
     }
 
+    return process;
 }
 
 #ifdef _WIN32