diff mbox

[for,2.9,v3,05/10] async: Introduce aio_co_enter and aio_co_enter_if_inactive

Message ID 20170410150542.30376-6-famz@redhat.com
State New
Headers show

Commit Message

Fam Zheng April 10, 2017, 3:05 p.m. UTC
They start the coroutine on the specified context.

Signed-off-by: Fam Zheng <famz@redhat.com>
---
 include/block/aio.h | 18 ++++++++++++++++++
 util/async.c        | 14 +++++++++++++-
 2 files changed, 31 insertions(+), 1 deletion(-)

Comments

Kevin Wolf April 11, 2017, 9:28 a.m. UTC | #1
Am 10.04.2017 um 17:05 hat Fam Zheng geschrieben:
> They start the coroutine on the specified context.
> 
> Signed-off-by: Fam Zheng <famz@redhat.com>
> ---
>  include/block/aio.h | 18 ++++++++++++++++++
>  util/async.c        | 14 +++++++++++++-
>  2 files changed, 31 insertions(+), 1 deletion(-)
> 
> diff --git a/include/block/aio.h b/include/block/aio.h
> index 677b6ff..b0a6bb3 100644
> --- a/include/block/aio.h
> +++ b/include/block/aio.h
> @@ -511,6 +511,24 @@ void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
>  void aio_co_wake(struct Coroutine *co);
>  
>  /**
> + * aio_co_enter:
> + * @ctx: the context to run the coroutine
> + * @co: the coroutine to run
> + *
> + * Enter a coroutine in the specified AioContext.
> + */
> +void aio_co_enter(AioContext *ctx, struct Coroutine *co);
> +
> +/**
> + * aio_co_enter_if_inactive:
> + * @ctx: the context to run the coroutine
> + * @co: the coroutine to run
> + *
> + * Enter a coroutine in the specified AioContext, if it's not already entered.
> + */
> +void aio_co_enter_if_inactive(AioContext *ctx, struct Coroutine *co);
> +
> +/**
>   * Return the AioContext whose event loop runs in the current thread.
>   *
>   * If called from an IOThread this will be the IOThread's AioContext.  If
> diff --git a/util/async.c b/util/async.c
> index 663e297..507671a 100644
> --- a/util/async.c
> +++ b/util/async.c
> @@ -453,6 +453,11 @@ void aio_co_wake(struct Coroutine *co)
>      smp_read_barrier_depends();
>      ctx = atomic_read(&co->ctx);
>  
> +    aio_co_enter(ctx, co);
> +}
> +
> +void aio_co_enter(AioContext *ctx, struct Coroutine *co)
> +{
>      if (ctx != qemu_get_current_aio_context()) {
>          aio_co_schedule(ctx, co);
>          return;
> @@ -464,11 +469,18 @@ void aio_co_wake(struct Coroutine *co)
>          QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
>      } else {
>          aio_context_acquire(ctx);
> -        qemu_coroutine_enter(co);
> +        qemu_aio_coroutine_enter(ctx, co);
>          aio_context_release(ctx);
>      }
>  }
>  
> +void aio_co_enter_if_inactive(AioContext *ctx, struct Coroutine *co)
> +{
> +    if (!qemu_coroutine_entered(co)) {
> +        aio_co_enter(ctx, co);
> +    }
> +}

Is this a useful function, though?

I think the only interesting case is the first qemu_coroutine_enter()
after a coroutine is created, here we may want it to run in a different
AioContext than the caller. However, once this has happened, it is
already running in the right AioContext and we can use the normal
functions without giving an explicit AioContext (except in cases where
we wouldn't reenter from a callback of that AioContext, but do such
cases even exist?)

So I expect that some patches down the series, we get a patch that
converts more than is actually necessary. Let's see.

Kevin
Fam Zheng April 11, 2017, 11:07 a.m. UTC | #2
On Tue, 04/11 11:28, Kevin Wolf wrote:
> Am 10.04.2017 um 17:05 hat Fam Zheng geschrieben:
> > They start the coroutine on the specified context.
> > 
> > Signed-off-by: Fam Zheng <famz@redhat.com>
> > ---
> >  include/block/aio.h | 18 ++++++++++++++++++
> >  util/async.c        | 14 +++++++++++++-
> >  2 files changed, 31 insertions(+), 1 deletion(-)
> > 
> > diff --git a/include/block/aio.h b/include/block/aio.h
> > index 677b6ff..b0a6bb3 100644
> > --- a/include/block/aio.h
> > +++ b/include/block/aio.h
> > @@ -511,6 +511,24 @@ void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
> >  void aio_co_wake(struct Coroutine *co);
> >  
> >  /**
> > + * aio_co_enter:
> > + * @ctx: the context to run the coroutine
> > + * @co: the coroutine to run
> > + *
> > + * Enter a coroutine in the specified AioContext.
> > + */
> > +void aio_co_enter(AioContext *ctx, struct Coroutine *co);
> > +
> > +/**
> > + * aio_co_enter_if_inactive:
> > + * @ctx: the context to run the coroutine
> > + * @co: the coroutine to run
> > + *
> > + * Enter a coroutine in the specified AioContext, if it's not already entered.
> > + */
> > +void aio_co_enter_if_inactive(AioContext *ctx, struct Coroutine *co);
> > +
> > +/**
> >   * Return the AioContext whose event loop runs in the current thread.
> >   *
> >   * If called from an IOThread this will be the IOThread's AioContext.  If
> > diff --git a/util/async.c b/util/async.c
> > index 663e297..507671a 100644
> > --- a/util/async.c
> > +++ b/util/async.c
> > @@ -453,6 +453,11 @@ void aio_co_wake(struct Coroutine *co)
> >      smp_read_barrier_depends();
> >      ctx = atomic_read(&co->ctx);
> >  
> > +    aio_co_enter(ctx, co);
> > +}
> > +
> > +void aio_co_enter(AioContext *ctx, struct Coroutine *co)
> > +{
> >      if (ctx != qemu_get_current_aio_context()) {
> >          aio_co_schedule(ctx, co);
> >          return;
> > @@ -464,11 +469,18 @@ void aio_co_wake(struct Coroutine *co)
> >          QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
> >      } else {
> >          aio_context_acquire(ctx);
> > -        qemu_coroutine_enter(co);
> > +        qemu_aio_coroutine_enter(ctx, co);
> >          aio_context_release(ctx);
> >      }
> >  }
> >  
> > +void aio_co_enter_if_inactive(AioContext *ctx, struct Coroutine *co)
> > +{
> > +    if (!qemu_coroutine_entered(co)) {
> > +        aio_co_enter(ctx, co);
> > +    }
> > +}
> 
> Is this a useful function, though?
> 
> I think the only interesting case is the first qemu_coroutine_enter()
> after a coroutine is created, here we may want it to run in a different
> AioContext than the caller. However, once this has happened, it is
> already running in the right AioContext and we can use the normal
> functions without giving an explicit AioContext (except in cases where
> we wouldn't reenter from a callback of that AioContext, but do such
> cases even exist?)
> 
> So I expect that some patches down the series, we get a patch that
> converts more than is actually necessary. Let's see.
> 
> Kevin

You are right, actually this function is unused.

Fam
diff mbox

Patch

diff --git a/include/block/aio.h b/include/block/aio.h
index 677b6ff..b0a6bb3 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -511,6 +511,24 @@  void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
 void aio_co_wake(struct Coroutine *co);
 
 /**
+ * aio_co_enter:
+ * @ctx: the context to run the coroutine
+ * @co: the coroutine to run
+ *
+ * Enter a coroutine in the specified AioContext.
+ */
+void aio_co_enter(AioContext *ctx, struct Coroutine *co);
+
+/**
+ * aio_co_enter_if_inactive:
+ * @ctx: the context to run the coroutine
+ * @co: the coroutine to run
+ *
+ * Enter a coroutine in the specified AioContext, if it's not already entered.
+ */
+void aio_co_enter_if_inactive(AioContext *ctx, struct Coroutine *co);
+
+/**
  * Return the AioContext whose event loop runs in the current thread.
  *
  * If called from an IOThread this will be the IOThread's AioContext.  If
diff --git a/util/async.c b/util/async.c
index 663e297..507671a 100644
--- a/util/async.c
+++ b/util/async.c
@@ -453,6 +453,11 @@  void aio_co_wake(struct Coroutine *co)
     smp_read_barrier_depends();
     ctx = atomic_read(&co->ctx);
 
+    aio_co_enter(ctx, co);
+}
+
+void aio_co_enter(AioContext *ctx, struct Coroutine *co)
+{
     if (ctx != qemu_get_current_aio_context()) {
         aio_co_schedule(ctx, co);
         return;
@@ -464,11 +469,18 @@  void aio_co_wake(struct Coroutine *co)
         QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
     } else {
         aio_context_acquire(ctx);
-        qemu_coroutine_enter(co);
+        qemu_aio_coroutine_enter(ctx, co);
         aio_context_release(ctx);
     }
 }
 
+void aio_co_enter_if_inactive(AioContext *ctx, struct Coroutine *co)
+{
+    if (!qemu_coroutine_entered(co)) {
+        aio_co_enter(ctx, co);
+    }
+}
+
 void aio_context_ref(AioContext *ctx)
 {
     g_source_ref(&ctx->source);