diff mbox

[2/2,RFC] Convert Clock Timerlists to RCU V2

Message ID 1393529758-12193-3-git-send-email-ncmike@ncultra.org
State New
Headers show

Commit Message

Mike D. Day Feb. 27, 2014, 7:35 p.m. UTC
timerlists is a list of lists that holds active timers, among other
items. It is read-mostly. This patch converts read access to the
timerlists to use RCU.

Rather than introduce a second mutex for timerlists, which would
require nested mutexes to in orderwrite to the timerlists, use one
QemuMutex in the QemuClock structure for all write access to any list
hanging off the QemuClock structure.

This patch also protects timerlists->active_timers->timer_list by the
new clock mutex for write access and by RCU for read access.

This patch applies against Paolo Bonzini's rcu branch:
https://github.com/bonzini/qemu/tree/rcu and also requires the
previously submitted patch 03fba95 "Convert active timers list to use
RCU for read operations V2."

V2:
- timerlists modifications split to a separate patch (this one).
- Addressed Alex Blighs comments.

Signed-off-by: Mike Day <ncmike@ncultra.org>
---
 qemu-timer.c | 79 +++++++++++++++++++++++++++++++++++++++++-------------------
 1 file changed, 54 insertions(+), 25 deletions(-)

Comments

Alex Bligh Feb. 28, 2014, 8:05 p.m. UTC | #1
Mike

On 27 Feb 2014, at 19:35, Mike Day wrote:

> timerlists is a list of lists that holds active timers, among other
> items. It is read-mostly. This patch converts read access to the
> timerlists to use RCU.
> 
> Rather than introduce a second mutex for timerlists, which would
> require nested mutexes to in orderwrite to the timerlists, use one
> QemuMutex in the QemuClock structure for all write access to any list
> hanging off the QemuClock structure.

I sort of wonder why you don't just use one Mutex across the whole
of QEMU rather than one per clock.

This would save worrying about whether when you do things like:
  qemu_mutex_lock(&timer_list->clock->clock_mutex);

timer_list 'disappears' as (prior to taking the mutex) it's outside
the rcu read lock. 

> @@ -108,19 +108,24 @@ QEMUTimerList *timerlist_new(QEMUClockType type,
>     timer_list->clock = clock;
>     timer_list->notify_cb = cb;
>     timer_list->notify_opaque = opaque;
> -    qemu_mutex_init(&timer_list->active_timers_lock);
> +    qemu_mutex_init(&clock->clock_mutex);
>     QLIST_INSERT_HEAD(&clock->timerlists, timer_list, list);
>     return timer_list;
> }

That can't be right, you are calling qemu_mutex_init for each
timerlist, but the timerlists share the mutex which is now in the
clock. The mutex should therefore be initialized in qemu_clock_init.

Also, clock_mutex appears never to get destroyed.

> QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list)
> {
> -    return timer_list->clock->type;
> +    return atomic_rcu_read(&timer_list->clock->type);
> }

I don't think this works because of the double indirection. It's
The '&' means it's not actually dereferencing clock->type, but it
is dereferencing timer_list->clock outside of either an rcu read
lock or an atomic read. I think you'd be better with than
rcu_read_lock() etc. (sadly).

> QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type)
> @@ -261,11 +272,13 @@ QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type)
> 
> void timerlist_notify(QEMUTimerList *timer_list)
> {
> -    if (timer_list->notify_cb) {
> +    rcu_read_lock();
> +    if (atomic_rcu_read(&timer_list->notify_cb)) {
>         timer_list->notify_cb(timer_list->notify_opaque);
>     } else {
>         qemu_notify_event();
>     }
> +    rcu_read_unlock();
> }

If you have the rcu_read_lock why do you need the atomic_rcu_read?
And if you need it, why do you not need it on the following line?

However, as any QEMUTimerList can (now) be reclaimed, surely all
callers of this function are going to need to hold the rcu_read_lock
anyway?

I think this is used only by timerlist_rearm and qemu_clock_notify.
Provided these call this function holding the rcu_read_lock
I don't think this function needs changing.

> /* Transition function to convert a nanosecond timeout to ms
> @@ -389,24 +402,30 @@ static void timerlist_rearm(QEMUTimerList *timer_list)
> /* stop a timer, but do not dealloc it */
> void timer_del(QEMUTimer *ts)
> {
> -    QEMUTimerList *timer_list = ts->timer_list;
> +    QEMUTimerList *timer_list;
> 
> -    qemu_mutex_lock(&timer_list->active_timers_lock);
> +    timer_list = atomic_rcu_read(&ts->timer_list);
> +    rcu_read_lock();
> +    qemu_mutex_lock(&timer_list->clock->clock_mutex);
> +    rcu_read_unlock();
>     timer_del_locked(timer_list, ts);
> -    qemu_mutex_unlock(&timer_list->active_timers_lock);
> +    qemu_mutex_unlock(&timer_list->clock->clock_mutex);
> }

Again in my ignorance of RCU I don't know whether taking a mutex
implicitly takes an rcu read lock. If not, that rcu_read_unlock
should be after the mutex unlock.

> /* modify the current timer so that it will be fired when current_time
>> = expire_time. The corresponding callback will be called. */
> void timer_mod_ns(QEMUTimer *ts, int64_t expire_time)
> {
> -    QEMUTimerList *timer_list = ts->timer_list;
> +    QEMUTimerList *timer_list;
>     bool rearm;
> 
> -    qemu_mutex_lock(&timer_list->active_timers_lock);
> +    timer_list = atomic_rcu_read(&ts->timer_list);
> +    rcu_read_lock();
> +    qemu_mutex_lock(&timer_list->clock->clock_mutex);
> +    rcu_read_unlock();
>     timer_del_locked(timer_list, ts);
>     rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
> -    qemu_mutex_unlock(&timer_list->active_timers_lock);
> +    qemu_mutex_unlock(&timer_list->clock->clock_mutex);
> 
>     if (rearm) {
>         timerlist_rearm(timer_list);

Ditto

> @@ -418,18 +437,20 @@ void timer_mod_ns(QEMUTimer *ts, int64_t expire_time)
>    The corresponding callback will be called. */
> void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time)
> {
> -    QEMUTimerList *timer_list = ts->timer_list;
> +    QEMUTimerList *timer_list;
>     bool rearm = false;
> 
> -    qemu_mutex_lock(&timer_list->active_timers_lock);
> +    timer_list = atomic_rcu_read(&ts->timer_list);
> +    rcu_read_lock();
> +    qemu_mutex_lock(&timer_list->clock->clock_mutex);
> +    rcu_read_unlock();
>     if (ts->expire_time == -1 || ts->expire_time > expire_time) {
>         if (ts->expire_time != -1) {
>             timer_del_locked(timer_list, ts);
>             rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
>         }
>     }
> -    qemu_mutex_unlock(&timer_list->active_timers_lock);
> -
> +    qemu_mutex_unlock(&timer_list->clock->clock_mutex);
>     if (rearm) {
>         timerlist_rearm(timer_list);
>     }

Ditto

> @@ -447,7 +468,10 @@ void timer_mod_anticipate(QEMUTimer *ts, int64_t expire_time)
> 
> bool timer_pending(QEMUTimer *ts)
> {
> -    return ts->expire_time >= 0;
> +    int64 expire_time;
> +
> +    expire_time = atomic_rcu_read(&ts->expire_time);
> +    return expire_time >= 0;
> }

Why not just

       return atomic_rcu_read(&ts->expire_time) >= 0;
Mike D. Day March 3, 2014, 2:02 p.m. UTC | #2
On Fri, Feb 28, 2014 at 3:05 PM, Alex Bligh <alex@alex.org.uk> wrote:

>> Rather than introduce a second mutex for timerlists, which would
>> require nested mutexes to in orderwrite to the timerlists, use one
>> QemuMutex in the QemuClock structure for all write access to any list
>> hanging off the QemuClock structure.
>
> I sort of wonder why you don't just use one Mutex across the whole
> of QEMU rather than one per clock.
>
> This would save worrying about whether when you do things like:
>   qemu_mutex_lock(&timer_list->clock->clock_mutex);

I like it, it solves another problem I spotted after I submitted the patch.

>
>> @@ -108,19 +108,24 @@ QEMUTimerList *timerlist_new(QEMUClockType type,
>>     timer_list->clock = clock;
>>     timer_list->notify_cb = cb;
>>     timer_list->notify_opaque = opaque;
>> -    qemu_mutex_init(&timer_list->active_timers_lock);
>> +    qemu_mutex_init(&clock->clock_mutex);
>>     QLIST_INSERT_HEAD(&clock->timerlists, timer_list, list);
>>     return timer_list;
>> }
>
> That can't be right, you are calling qemu_mutex_init for each
> timerlist, but the timerlists share the mutex which is now in the
> clock. The mutex should therefore be initialized in qemu_clock_init.

> Also, clock_mutex appears never to get destroyed.

Yes, thank you, on both points.

>
>> QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list)
>> {
>> -    return timer_list->clock->type;
>> +    return atomic_rcu_read(&timer_list->clock->type);
>> }
>
> I don't think this works because of the double indirection. It's
> The '&' means it's not actually dereferencing clock->type, but it
> is dereferencing timer_list->clock outside of either an rcu read
> lock or an atomic read. I think you'd be better with than
> rcu_read_lock() etc. (sadly).

I should fix this with parenthesis as in &(timer_list->clock->type)

>> QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type)
>> @@ -261,11 +272,13 @@ QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type)
>>
>> void timerlist_notify(QEMUTimerList *timer_list)
>> {
>> -    if (timer_list->notify_cb) {
>> +    rcu_read_lock();
>> +    if (atomic_rcu_read(&timer_list->notify_cb)) {
>>         timer_list->notify_cb(timer_list->notify_opaque);
>>     } else {
>>         qemu_notify_event();
>>     }
>> +    rcu_read_unlock();
>> }
>
> If you have the rcu_read_lock why do you need the atomic_rcu_read?
> And if you need it, why do you not need it on the following line?

rcu_read_lock/unlock and atomic_rcu_read/set do different things and
frequently need to be used together. rcu_read_lock/unlock causes the
thread to enter an RCU critical section by getting a counter out of
TLS. atomic_rcu_read/set will use memory barriers to flush caches
(depending on the memory model and platform) and ensure that reads and
writes are ordered. You typically would use atomic_rcu_read on the
first read, thereafter the memory is up-to-date and writes have been
flushed. And you typically use atomic_rcu_set on the last write, when
the data structure is complete. You don't need to use them on every
update. Just for completeness, rcu_read_unlock ends the rcu critical
section but its usually a no-op.

> However, as any QEMUTimerList can (now) be reclaimed, surely all
> callers of this function are going to need to hold the rcu_read_lock
> anyway?

Yes

> I think this is used only by timerlist_rearm and qemu_clock_notify.
> Provided these call this function holding the rcu_read_lock
> I don't think this function needs changing.
>
>> /* Transition function to convert a nanosecond timeout to ms
>> @@ -389,24 +402,30 @@ static void timerlist_rearm(QEMUTimerList *timer_list)
>> /* stop a timer, but do not dealloc it */
>> void timer_del(QEMUTimer *ts)
>> {
>> -    QEMUTimerList *timer_list = ts->timer_list;
>> +    QEMUTimerList *timer_list;
>>
>> -    qemu_mutex_lock(&timer_list->active_timers_lock);
>> +    timer_list = atomic_rcu_read(&ts->timer_list);
>> +    rcu_read_lock();
>> +    qemu_mutex_lock(&timer_list->clock->clock_mutex);
>> +    rcu_read_unlock();
>>     timer_del_locked(timer_list, ts);
>> -    qemu_mutex_unlock(&timer_list->active_timers_lock);
>> +    qemu_mutex_unlock(&timer_list->clock->clock_mutex);
>> }
>
> Again in my ignorance of RCU I don't know whether taking a mutex
> implicitly takes an rcu read lock. If not, that rcu_read_unlock
> should be after the mutex unlock.

I am going to change this because of a race condition, To answer your
question, holding a mutex does not imply holding an rcu read lock. It
does indicate the potential need for readers to use rcu to read the
list because they can do so when the mutex holder is updating the
list. And, I'm working under the assumption that holding a mutex
implies a memory barrier so there is no need for atomic_rcu_read/set.

>
>> @@ -447,7 +468,10 @@ void timer_mod_anticipate(QEMUTimer *ts, int64_t expire_time)
>>
>> bool timer_pending(QEMUTimer *ts)
>> {
>> -    return ts->expire_time >= 0;
>> +    int64 expire_time;
>> +
>> +    expire_time = atomic_rcu_read(&ts->expire_time);
>> +    return expire_time >= 0;
>> }
>
> Why not just
>
>        return atomic_rcu_read(&ts->expire_time) >= 0;

That's better.

Thanks Alex!
Paolo Bonzini March 3, 2014, 2:14 p.m. UTC | #3
Il 28/02/2014 21:05, Alex Bligh ha scritto:
> Mike
>
> On 27 Feb 2014, at 19:35, Mike Day wrote:
>
>> timerlists is a list of lists that holds active timers, among other
>> items. It is read-mostly. This patch converts read access to the
>> timerlists to use RCU.
>>
>> Rather than introduce a second mutex for timerlists, which would
>> require nested mutexes to in orderwrite to the timerlists, use one
>> QemuMutex in the QemuClock structure for all write access to any list
>> hanging off the QemuClock structure.
>
> I sort of wonder why you don't just use one Mutex across the whole
> of QEMU rather than one per clock.

I think this is not enough; separate iothreads could have separate 
timerlist, and higher-priority iothreads should not be slowed down by 
lower-priority iothreads.

>> QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list)
>> {
>> -    return timer_list->clock->type;
>> +    return atomic_rcu_read(&timer_list->clock->type);
>> }
>
> I don't think this works because of the double indirection.

It doesn't but you can of course do this:

	QEMUClock *clock = atomic_rcu_read(&timer_list->clock);
	return atomic_rcu_read(&clock->type);

> It's The '&' means it's not actually dereferencing clock->type, but it
> is dereferencing timer_list->clock outside of either an rcu read
> lock or an atomic read. I think you'd be better with than
> rcu_read_lock() etc. (sadly).

You could also assume that the caller does it.  Right now, this function 
has no user, so you just have to document it.

>> QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type)
>> @@ -261,11 +272,13 @@ QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type)
>>
>> void timerlist_notify(QEMUTimerList *timer_list)
>> {
>> -    if (timer_list->notify_cb) {
>> +    rcu_read_lock();
>> +    if (atomic_rcu_read(&timer_list->notify_cb)) {
>>         timer_list->notify_cb(timer_list->notify_opaque);
>>     } else {
>>         qemu_notify_event();
>>     }
>> +    rcu_read_unlock();
>> }
>
> If you have the rcu_read_lock why do you need the atomic_rcu_read?
> And if you need it, why do you not need it on the following line?

Read the documentation. :)  It was also posted on the list.

atomic_rcu_read() is only about blocking invalid compiler optimizations; 
it is not required if you are in the *write* side, but it is always 
required in the read side.

However, I agree that it is not needed here.  atomic_rcu_read() is not 
needed when reading a "leaf" element of the data structure.

> However, as any QEMUTimerList can (now) be reclaimed, surely all
> callers of this function are going to need to hold the rcu_read_lock
> anyway?
>
> I think this is used only by timerlist_rearm and qemu_clock_notify.
> Provided these call this function holding the rcu_read_lock
> I don't think this function needs changing.

Yes.

Paolo
diff mbox

Patch

diff --git a/qemu-timer.c b/qemu-timer.c
index dad30a3..e335a7a 100644
--- a/qemu-timer.c
+++ b/qemu-timer.c
@@ -47,7 +47,7 @@ 
 
 typedef struct QEMUClock {
     QLIST_HEAD(, QEMUTimerList) timerlists;
-
+    QemuMutex clock_mutex;
     NotifierList reset_notifiers;
     int64_t last;
     QEMUClockType type;
@@ -69,12 +69,12 @@  QEMUClock qemu_clocks[QEMU_CLOCK_MAX];
 
 struct QEMUTimerList {
     QEMUClock *clock;
-    QemuMutex active_timers_lock;
     QEMUTimer *active_timers;
     QLIST_ENTRY(QEMUTimerList) list;
     QEMUTimerListNotifyCB *notify_cb;
     void *notify_opaque;
     QemuEvent timers_done_ev;
+    struct rcu_head rcu;
 };
 
 /**
@@ -108,19 +108,24 @@  QEMUTimerList *timerlist_new(QEMUClockType type,
     timer_list->clock = clock;
     timer_list->notify_cb = cb;
     timer_list->notify_opaque = opaque;
-    qemu_mutex_init(&timer_list->active_timers_lock);
+    qemu_mutex_init(&clock->clock_mutex);
     QLIST_INSERT_HEAD(&clock->timerlists, timer_list, list);
     return timer_list;
 }
 
+static void reclaim_timerlist(struct rcu_head *rcu)
+{
+    QEMUTimerList *tl = container_of(rcu, QEMUTimerList, rcu);
+    g_free(tl);
+}
+
 void timerlist_free(QEMUTimerList *timer_list)
 {
     assert(!timerlist_has_timers(timer_list));
     if (timer_list->clock) {
         QLIST_REMOVE(timer_list, list);
     }
-    qemu_mutex_destroy(&timer_list->active_timers_lock);
-    g_free(timer_list);
+    call_rcu1(&timer_list->rcu, reclaim_timerlist);
 }
 
 static void qemu_clock_init(QEMUClockType type)
@@ -144,9 +149,11 @@  void qemu_clock_notify(QEMUClockType type)
 {
     QEMUTimerList *timer_list;
     QEMUClock *clock = qemu_clock_ptr(type);
-    QLIST_FOREACH(timer_list, &clock->timerlists, list) {
+    rcu_read_lock();
+    QLIST_FOREACH_RCU(timer_list, &clock->timerlists, list) {
         timerlist_notify(timer_list);
     }
+    rcu_read_unlock();
 }
 
 void qemu_clock_enable(QEMUClockType type, bool enabled)
@@ -154,13 +161,15 @@  void qemu_clock_enable(QEMUClockType type, bool enabled)
     QEMUClock *clock = qemu_clock_ptr(type);
     QEMUTimerList *tl;
     bool old = clock->enabled;
-    clock->enabled = enabled;
+    atomic_rcu_set(&clock->enabled, enabled);
     if (enabled && !old) {
         qemu_clock_notify(type);
     } else if (!enabled && old) {
-        QLIST_FOREACH(tl, &clock->timerlists, list) {
+        rcu_read_lock();
+        QLIST_FOREACH_RCU(tl, &clock->timerlists, list) {
             qemu_event_wait(&tl->timers_done_ev);
         }
+        rcu_read_unlock();
     }
 }
 
@@ -242,16 +251,18 @@  int64_t qemu_clock_deadline_ns_all(QEMUClockType type)
     int64_t deadline = -1;
     QEMUTimerList *timer_list;
     QEMUClock *clock = qemu_clock_ptr(type);
-    QLIST_FOREACH(timer_list, &clock->timerlists, list) {
+    rcu_read_lock();
+    QLIST_FOREACH_RCU(timer_list, &clock->timerlists, list) {
         deadline = qemu_soonest_timeout(deadline,
                                         timerlist_deadline_ns(timer_list));
     }
+    rcu_read_unlock();
     return deadline;
 }
 
 QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list)
 {
-    return timer_list->clock->type;
+    return atomic_rcu_read(&timer_list->clock->type);
 }
 
 QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type)
@@ -261,11 +272,13 @@  QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type)
 
 void timerlist_notify(QEMUTimerList *timer_list)
 {
-    if (timer_list->notify_cb) {
+    rcu_read_lock();
+    if (atomic_rcu_read(&timer_list->notify_cb)) {
         timer_list->notify_cb(timer_list->notify_opaque);
     } else {
         qemu_notify_event();
     }
+    rcu_read_unlock();
 }
 
 /* Transition function to convert a nanosecond timeout to ms
@@ -389,24 +402,30 @@  static void timerlist_rearm(QEMUTimerList *timer_list)
 /* stop a timer, but do not dealloc it */
 void timer_del(QEMUTimer *ts)
 {
-    QEMUTimerList *timer_list = ts->timer_list;
+    QEMUTimerList *timer_list;
 
-    qemu_mutex_lock(&timer_list->active_timers_lock);
+    timer_list = atomic_rcu_read(&ts->timer_list);
+    rcu_read_lock();
+    qemu_mutex_lock(&timer_list->clock->clock_mutex);
+    rcu_read_unlock();
     timer_del_locked(timer_list, ts);
-    qemu_mutex_unlock(&timer_list->active_timers_lock);
+    qemu_mutex_unlock(&timer_list->clock->clock_mutex);
 }
 
 /* modify the current timer so that it will be fired when current_time
    >= expire_time. The corresponding callback will be called. */
 void timer_mod_ns(QEMUTimer *ts, int64_t expire_time)
 {
-    QEMUTimerList *timer_list = ts->timer_list;
+    QEMUTimerList *timer_list;
     bool rearm;
 
-    qemu_mutex_lock(&timer_list->active_timers_lock);
+    timer_list = atomic_rcu_read(&ts->timer_list);
+    rcu_read_lock();
+    qemu_mutex_lock(&timer_list->clock->clock_mutex);
+    rcu_read_unlock();
     timer_del_locked(timer_list, ts);
     rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
-    qemu_mutex_unlock(&timer_list->active_timers_lock);
+    qemu_mutex_unlock(&timer_list->clock->clock_mutex);
 
     if (rearm) {
         timerlist_rearm(timer_list);
@@ -418,18 +437,20 @@  void timer_mod_ns(QEMUTimer *ts, int64_t expire_time)
    The corresponding callback will be called. */
 void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time)
 {
-    QEMUTimerList *timer_list = ts->timer_list;
+    QEMUTimerList *timer_list;
     bool rearm = false;
 
-    qemu_mutex_lock(&timer_list->active_timers_lock);
+    timer_list = atomic_rcu_read(&ts->timer_list);
+    rcu_read_lock();
+    qemu_mutex_lock(&timer_list->clock->clock_mutex);
+    rcu_read_unlock();
     if (ts->expire_time == -1 || ts->expire_time > expire_time) {
         if (ts->expire_time != -1) {
             timer_del_locked(timer_list, ts);
             rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
         }
     }
-    qemu_mutex_unlock(&timer_list->active_timers_lock);
-
+    qemu_mutex_unlock(&timer_list->clock->clock_mutex);
     if (rearm) {
         timerlist_rearm(timer_list);
     }
@@ -447,7 +468,10 @@  void timer_mod_anticipate(QEMUTimer *ts, int64_t expire_time)
 
 bool timer_pending(QEMUTimer *ts)
 {
-    return ts->expire_time >= 0;
+    int64 expire_time;
+
+    expire_time = atomic_rcu_read(&ts->expire_time);
+    return expire_time >= 0;
 }
 
 bool timer_expired(QEMUTimer *timer_head, int64_t current_time)
@@ -471,10 +495,10 @@  bool timerlist_run_timers(QEMUTimerList *timer_list)
     qemu_event_reset(&timer_list->timers_done_ev);
     current_time = qemu_clock_get_ns(timer_list->clock->type);
     for(;;) {
-        qemu_mutex_lock(&timer_list->active_timers_lock);
+        qemu_mutex_lock(&timer_list->clock->clock_mutex);
         ts = timer_list->active_timers;
         if (!timer_expired_ns(ts, current_time)) {
-            qemu_mutex_unlock(&timer_list->active_timers_lock);
+            qemu_mutex_unlock(&timer_list->clock->clock_mutex);
             break;
         }
 
@@ -485,7 +509,7 @@  bool timerlist_run_timers(QEMUTimerList *timer_list)
         cb = ts->cb;
         opaque = ts->opaque;
         rcu_read_lock();
-        qemu_mutex_unlock(&timer_list->active_timers_lock);
+        qemu_mutex_unlock(&timer_list->clock->clock_mutex);
         /* run the callback (the timer list can be modified) */
         cb(opaque);
         rcu_read_unlock();
@@ -571,13 +595,18 @@  void qemu_clock_register_reset_notifier(QEMUClockType type,
                                         Notifier *notifier)
 {
     QEMUClock *clock = qemu_clock_ptr(type);
+    qemu_mutex_lock(&clock->clock_mutex);
     notifier_list_add(&clock->reset_notifiers, notifier);
+    qemu_mutex_unlock(&clock->clock_mutex);
 }
 
 void qemu_clock_unregister_reset_notifier(QEMUClockType type,
                                           Notifier *notifier)
 {
+    QEMUClock *clock = qemu_clock_ptr(type);
+    qemu_mutex_lock(&clock->clock_mutex);
     notifier_remove(notifier);
+    qemu_mutex_unlock(&clock->clock_mutex);
 }
 
 void init_clocks(void)