diff mbox series

[v5,1/3] qemu-thread: Add qemu_cond_timedwait

Message ID 20190826103726.25538-2-yury-kotov@yandex-team.ru
State New
Headers show
Series High downtime with 95+ throttle pct | expand

Commit Message

Yury Kotov Aug. 26, 2019, 10:37 a.m. UTC
Signed-off-by: Yury Kotov <yury-kotov@yandex-team.ru>
---
 include/qemu/thread.h    | 18 ++++++++++++++++++
 util/qemu-thread-posix.c | 40 ++++++++++++++++++++++++++++------------
 util/qemu-thread-win32.c | 16 ++++++++++++++++
 util/qsp.c               | 18 ++++++++++++++++++
 4 files changed, 80 insertions(+), 12 deletions(-)

Comments

Eric Blake Sept. 5, 2019, 7:45 p.m. UTC | #1
On 8/26/19 5:37 AM, Yury Kotov wrote:
> Signed-off-by: Yury Kotov <yury-kotov@yandex-team.ru>
> ---

Rather sparse on the commit message details.

>  include/qemu/thread.h    | 18 ++++++++++++++++++
>  util/qemu-thread-posix.c | 40 ++++++++++++++++++++++++++++------------
>  util/qemu-thread-win32.c | 16 ++++++++++++++++
>  util/qsp.c               | 18 ++++++++++++++++++
>  4 files changed, 80 insertions(+), 12 deletions(-)
> 

> +++ b/util/qemu-thread-posix.c
> @@ -36,6 +36,18 @@ static void error_exit(int err, const char *msg)
>      abort();
>  }
>  
> +static void compute_abs_deadline(struct timespec *ts, int ms)
> +{
> +    struct timeval tv;
> +    gettimeofday(&tv, NULL);
> +    ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
> +    ts->tv_sec = tv.tv_sec + ms / 1000;
> +    if (ts->tv_nsec >= 1000000000) {
> +        ts->tv_sec++;
> +        ts->tv_nsec -= 1000000000;
> +    }

I don't know if any named constants would make this easier or harder to
read (such as USEC_PER_SEC 1000000 or NSEC_PER_SEC 1000000000), but the
conversion from relative ms to absolute timespec looks correct. [1]

> +void qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
> +                              const char *file, const int line)
> +{
> +    int err;
> +    struct timespec ts;
> +
> +    assert(cond->initialized);
> +    trace_qemu_mutex_unlock(mutex, file, line);
> +    compute_abs_deadline(&ts, ms);
> +    err = pthread_cond_timedwait(&cond->cond, &mutex->lock, &ts);
> +    trace_qemu_mutex_locked(mutex, file, line);
> +    if (err && err != ETIMEDOUT) {
> +        error_exit(err, __func__);
> +    }
> +}

However, this function returning void looks odd.  Although ETIMEDOUT is
the one error that guarantees that mutex is reobtained (all other errors
occur before the mutex is given up in the first place), and even though
the man page warns that you MUST recheck the condition variable in a
while loop regardless of success or failure (it might be a spurious
successful wake-up due to a broadcast where neither the condition nor
the timeout has actually been reached yet; or it might be a race where
the function reports a timeout immediately before the condition variable
became available after all), it still seems like callers might like to
know if a timeout happened, without having to calculate an ending
absolute time themselves.


>  
> -static void compute_abs_deadline(struct timespec *ts, int ms)
> -{
> -    struct timeval tv;

[1] Oh, you mixed code motion with new code, but the commit message
didn't mention that.  It's not necessarily worth splitting the patch,
but at least mentioning it would be worthwhile.

> +++ b/util/qemu-thread-win32.c
> @@ -145,6 +145,22 @@ void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, con
>      qemu_mutex_post_lock(mutex, file, line);
>  }
>  
> +void qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
> +                              const char *file, const int line)
> +{
> +    int rc = 0;
> +
> +    assert(cond->initialized);
> +    trace_qemu_mutex_unlock(mutex, file, line);
> +    if (!SleepConditionVariableSRW(&cond->var, &mutex->lock, ms, 0)) {
> +        rc = GetLastError();
> +    }
> +    trace_qemu_mutex_locked(mutex, file, line);
> +    if (rc && rc != ERROR_TIMEOUT) {
> +        error_exit(rc, __func__);
> +    }
> +}

I am less certain that this implementation is correct, but on the
surface it seems okay.


>  
> +static void
> +qsp_cond_timedwait(QemuCond *cond, QemuMutex *mutex, int ms,
> +                   const char *file, int line)
> +{
> +    QSPEntry *e;
> +    int64_t t0, t1;
> +
> +    t0 = get_clock();
> +    qemu_cond_timedwait_impl(cond, mutex, ms, file, line);
> +    t1 = get_clock();
> +
> +    e = qsp_entry_get(cond, file, line, QSP_CONDVAR);
> +    qsp_entry_record(e, t1 - t0);
> +}

Another function where a bool or int return (to distinguish success from
timeout) might be worthwhile to some callers.
diff mbox series

Patch

diff --git a/include/qemu/thread.h b/include/qemu/thread.h
index 55d83a907c..d0cd7b9ae0 100644
--- a/include/qemu/thread.h
+++ b/include/qemu/thread.h
@@ -34,6 +34,8 @@  typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l);
 typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l);
 typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
                                  int l);
+typedef void (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms,
+                                      const char *f, int l);
 
 extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
 extern QemuMutexLockFunc qemu_mutex_lock_func;
@@ -41,6 +43,7 @@  extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
 extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
 extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func;
 extern QemuCondWaitFunc qemu_cond_wait_func;
+extern QemuCondTimedWaitFunc qemu_cond_timedwait_func;
 
 /* convenience macros to bypass the profiler */
 #define qemu_mutex_lock__raw(m)                         \
@@ -63,6 +66,8 @@  extern QemuCondWaitFunc qemu_cond_wait_func;
             qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__);
 #define qemu_cond_wait(c, m)                                            \
             qemu_cond_wait_impl(c, m, __FILE__, __LINE__);
+#define qemu_cond_timedwait(c, m, ms)                                   \
+            qemu_cond_wait_impl(c, m, ms, __FILE__, __LINE__);
 #else
 #define qemu_mutex_lock(m) ({                                           \
             QemuMutexLockFunc _f = atomic_read(&qemu_mutex_lock_func);  \
@@ -89,6 +94,11 @@  extern QemuCondWaitFunc qemu_cond_wait_func;
             QemuCondWaitFunc _f = atomic_read(&qemu_cond_wait_func);    \
             _f(c, m, __FILE__, __LINE__);                               \
         })
+
+#define qemu_cond_timedwait(c, m, ms) ({                                       \
+            QemuCondTimedWaitFunc _f = atomic_read(&qemu_cond_timedwait_func); \
+            _f(c, m, ms, __FILE__, __LINE__);                                  \
+        })
 #endif
 
 #define qemu_mutex_unlock(mutex) \
@@ -134,12 +144,20 @@  void qemu_cond_signal(QemuCond *cond);
 void qemu_cond_broadcast(QemuCond *cond);
 void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex,
                          const char *file, const int line);
+void qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
+                              const char *file, const int line);
 
 static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex)
 {
     qemu_cond_wait(cond, mutex);
 }
 
+static inline void (qemu_cond_timedwait)(QemuCond *cond, QemuMutex *mutex,
+                                         int ms)
+{
+    qemu_cond_timedwait(cond, mutex, ms);
+}
+
 void qemu_sem_init(QemuSemaphore *sem, int init);
 void qemu_sem_post(QemuSemaphore *sem);
 void qemu_sem_wait(QemuSemaphore *sem);
diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c
index 1bf5e65dea..eed777d9ec 100644
--- a/util/qemu-thread-posix.c
+++ b/util/qemu-thread-posix.c
@@ -36,6 +36,18 @@  static void error_exit(int err, const char *msg)
     abort();
 }
 
+static void compute_abs_deadline(struct timespec *ts, int ms)
+{
+    struct timeval tv;
+    gettimeofday(&tv, NULL);
+    ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
+    ts->tv_sec = tv.tv_sec + ms / 1000;
+    if (ts->tv_nsec >= 1000000000) {
+        ts->tv_sec++;
+        ts->tv_nsec -= 1000000000;
+    }
+}
+
 void qemu_mutex_init(QemuMutex *mutex)
 {
     int err;
@@ -164,6 +176,22 @@  void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, con
         error_exit(err, __func__);
 }
 
+void qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
+                              const char *file, const int line)
+{
+    int err;
+    struct timespec ts;
+
+    assert(cond->initialized);
+    trace_qemu_mutex_unlock(mutex, file, line);
+    compute_abs_deadline(&ts, ms);
+    err = pthread_cond_timedwait(&cond->cond, &mutex->lock, &ts);
+    trace_qemu_mutex_locked(mutex, file, line);
+    if (err && err != ETIMEDOUT) {
+        error_exit(err, __func__);
+    }
+}
+
 void qemu_sem_init(QemuSemaphore *sem, int init)
 {
     int rc;
@@ -238,18 +266,6 @@  void qemu_sem_post(QemuSemaphore *sem)
 #endif
 }
 
-static void compute_abs_deadline(struct timespec *ts, int ms)
-{
-    struct timeval tv;
-    gettimeofday(&tv, NULL);
-    ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
-    ts->tv_sec = tv.tv_sec + ms / 1000;
-    if (ts->tv_nsec >= 1000000000) {
-        ts->tv_sec++;
-        ts->tv_nsec -= 1000000000;
-    }
-}
-
 int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
 {
     int rc;
diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c
index 572f88535d..5faa01cb61 100644
--- a/util/qemu-thread-win32.c
+++ b/util/qemu-thread-win32.c
@@ -145,6 +145,22 @@  void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, con
     qemu_mutex_post_lock(mutex, file, line);
 }
 
+void qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
+                              const char *file, const int line)
+{
+    int rc = 0;
+
+    assert(cond->initialized);
+    trace_qemu_mutex_unlock(mutex, file, line);
+    if (!SleepConditionVariableSRW(&cond->var, &mutex->lock, ms, 0)) {
+        rc = GetLastError();
+    }
+    trace_qemu_mutex_locked(mutex, file, line);
+    if (rc && rc != ERROR_TIMEOUT) {
+        error_exit(rc, __func__);
+    }
+}
+
 void qemu_sem_init(QemuSemaphore *sem, int init)
 {
     /* Manual reset.  */
diff --git a/util/qsp.c b/util/qsp.c
index 5264c97342..904dcb7436 100644
--- a/util/qsp.c
+++ b/util/qsp.c
@@ -131,6 +131,7 @@  QemuRecMutexLockFunc qemu_rec_mutex_lock_func = qemu_rec_mutex_lock_impl;
 QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func =
     qemu_rec_mutex_trylock_impl;
 QemuCondWaitFunc qemu_cond_wait_func = qemu_cond_wait_impl;
+QemuCondTimedWaitFunc qemu_cond_timedwait_func = qemu_cond_timedwait_impl;
 
 /*
  * It pays off to _not_ hash callsite->file; hashing a string is slow, and
@@ -412,6 +413,21 @@  qsp_cond_wait(QemuCond *cond, QemuMutex *mutex, const char *file, int line)
     qsp_entry_record(e, t1 - t0);
 }
 
+static void
+qsp_cond_timedwait(QemuCond *cond, QemuMutex *mutex, int ms,
+                   const char *file, int line)
+{
+    QSPEntry *e;
+    int64_t t0, t1;
+
+    t0 = get_clock();
+    qemu_cond_timedwait_impl(cond, mutex, ms, file, line);
+    t1 = get_clock();
+
+    e = qsp_entry_get(cond, file, line, QSP_CONDVAR);
+    qsp_entry_record(e, t1 - t0);
+}
+
 bool qsp_is_enabled(void)
 {
     return atomic_read(&qemu_mutex_lock_func) == qsp_mutex_lock;
@@ -425,6 +441,7 @@  void qsp_enable(void)
     atomic_set(&qemu_rec_mutex_lock_func, qsp_rec_mutex_lock);
     atomic_set(&qemu_rec_mutex_trylock_func, qsp_rec_mutex_trylock);
     atomic_set(&qemu_cond_wait_func, qsp_cond_wait);
+    atomic_set(&qemu_cond_timedwait_func, qsp_cond_timedwait);
 }
 
 void qsp_disable(void)
@@ -435,6 +452,7 @@  void qsp_disable(void)
     atomic_set(&qemu_rec_mutex_lock_func, qemu_rec_mutex_lock_impl);
     atomic_set(&qemu_rec_mutex_trylock_func, qemu_rec_mutex_trylock_impl);
     atomic_set(&qemu_cond_wait_func, qemu_cond_wait_impl);
+    atomic_set(&qemu_cond_timedwait_func, qemu_cond_timedwait_impl);
 }
 
 static gint qsp_tree_cmp(gconstpointer ap, gconstpointer bp, gpointer up)