diff mbox series

[10/35] enable tail call optimization of qemu_co_mutex_lock

Message ID 20220310124413.1102441-11-pbonzini@redhat.com
State New
Headers show
Series stackless coroutine backend | expand

Commit Message

Paolo Bonzini March 10, 2022, 12:43 p.m. UTC
Make qemu_co_mutex_lock_slowpath a tail call, so that qemu_co_mutex_lock
does not need to build a stack frame of its own.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 util/qemu-coroutine-lock.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
index d6c0565ba5..048cfcea71 100644
--- a/util/qemu-coroutine-lock.c
+++ b/util/qemu-coroutine-lock.c
@@ -231,6 +231,8 @@  static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx,
 
     qemu_coroutine_yield();
     trace_qemu_co_mutex_lock_return(mutex, self);
+    mutex->holder = self;
+    self->locks_held++;
 }
 
 void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
@@ -266,11 +268,11 @@  retry_fast_path:
         /* Uncontended.  */
         trace_qemu_co_mutex_lock_uncontended(mutex, self);
         mutex->ctx = ctx;
+        mutex->holder = self;
+        self->locks_held++;
     } else {
         qemu_co_mutex_lock_slowpath(ctx, mutex);
     }
-    mutex->holder = self;
-    self->locks_held++;
 }
 
 void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)