diff mbox

[2/3] AioContext: acquire/release AioContext during aio_poll

Message ID 1424449612-18215-3-git-send-email-pbonzini@redhat.com
State New
Headers show

Commit Message

Paolo Bonzini Feb. 20, 2015, 4:26 p.m. UTC
This is the first step in pushing down acquire/release, and will let
rfifolock drop the contention callback feature.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 aio-posix.c         |  9 +++++++++
 aio-win32.c         |  8 ++++++++
 include/block/aio.h | 15 ++++++++-------
 3 files changed, 25 insertions(+), 7 deletions(-)

Comments

Fam Zheng Feb. 25, 2015, 5:45 a.m. UTC | #1
On Fri, 02/20 17:26, Paolo Bonzini wrote:
> This is the first step in pushing down acquire/release, and will let
> rfifolock drop the contention callback feature.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  aio-posix.c         |  9 +++++++++
>  aio-win32.c         |  8 ++++++++
>  include/block/aio.h | 15 ++++++++-------
>  3 files changed, 25 insertions(+), 7 deletions(-)
> 
> diff --git a/aio-posix.c b/aio-posix.c
> index 4a30b77..292ae84 100644
> --- a/aio-posix.c
> +++ b/aio-posix.c
> @@ -238,6 +238,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
>      bool progress;
>      int64_t timeout;
>  
> +    aio_context_acquire(ctx);
>      was_dispatching = ctx->dispatching;
>      progress = false;
>  
> @@ -267,7 +268,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
>      timeout = blocking ? aio_compute_timeout(ctx) : 0;
>  
>      /* wait until next event */
> +    if (timeout) {
> +        aio_context_release(ctx);
> +    }
>      ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout);
> +    if (timeout) {
> +        aio_context_acquire(ctx);
> +    }
>  
>      /* if we have any readable fds, dispatch event */
>      if (ret > 0) {
> @@ -285,5 +292,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
>      }
>  
>      aio_set_dispatching(ctx, was_dispatching);
> +    aio_context_release(ctx);
> +
>      return progress;
>  }
> diff --git a/aio-win32.c b/aio-win32.c
> index e6f4ced..233d8f5 100644
> --- a/aio-win32.c
> +++ b/aio-win32.c
> @@ -283,6 +283,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
>      int count;
>      int timeout;
>  
> +    aio_context_acquire(ctx);
>      have_select_revents = aio_prepare(ctx);
>      if (have_select_revents) {
>          blocking = false;
> @@ -323,7 +324,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
>  
>          timeout = blocking
>              ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
> +        if (timeout) {
> +            aio_context_release(ctx);

Why are the unlock/lock pairs around poll conditional?

Fam

> +        }
>          ret = WaitForMultipleObjects(count, events, FALSE, timeout);
> +        if (timeout) {
> +            aio_context_acquire(ctx);
> +        }
>          aio_set_dispatching(ctx, true);
>  
>          if (first && aio_bh_poll(ctx)) {
Paolo Bonzini Feb. 26, 2015, 1:21 p.m. UTC | #2
On 25/02/2015 06:45, Fam Zheng wrote:
> On Fri, 02/20 17:26, Paolo Bonzini wrote:
>> This is the first step in pushing down acquire/release, and will let
>> rfifolock drop the contention callback feature.
>>
>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
>> ---
>>  aio-posix.c         |  9 +++++++++
>>  aio-win32.c         |  8 ++++++++
>>  include/block/aio.h | 15 ++++++++-------
>>  3 files changed, 25 insertions(+), 7 deletions(-)
>>
>> diff --git a/aio-posix.c b/aio-posix.c
>> index 4a30b77..292ae84 100644
>> --- a/aio-posix.c
>> +++ b/aio-posix.c
>> @@ -238,6 +238,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
>>      bool progress;
>>      int64_t timeout;
>>  
>> +    aio_context_acquire(ctx);
>>      was_dispatching = ctx->dispatching;
>>      progress = false;
>>  
>> @@ -267,7 +268,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
>>      timeout = blocking ? aio_compute_timeout(ctx) : 0;
>>  
>>      /* wait until next event */
>> +    if (timeout) {
>> +        aio_context_release(ctx);
>> +    }
>>      ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout);
>> +    if (timeout) {
>> +        aio_context_acquire(ctx);
>> +    }
>>  
>>      /* if we have any readable fds, dispatch event */
>>      if (ret > 0) {
>> @@ -285,5 +292,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
>>      }
>>  
>>      aio_set_dispatching(ctx, was_dispatching);
>> +    aio_context_release(ctx);
>> +
>>      return progress;
>>  }
>> diff --git a/aio-win32.c b/aio-win32.c
>> index e6f4ced..233d8f5 100644
>> --- a/aio-win32.c
>> +++ b/aio-win32.c
>> @@ -283,6 +283,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
>>      int count;
>>      int timeout;
>>  
>> +    aio_context_acquire(ctx);
>>      have_select_revents = aio_prepare(ctx);
>>      if (have_select_revents) {
>>          blocking = false;
>> @@ -323,7 +324,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
>>  
>>          timeout = blocking
>>              ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
>> +        if (timeout) {
>> +            aio_context_release(ctx);
> 
> Why are the unlock/lock pairs around poll conditional?

Both iothread.c and os_host_main_loop_wait are doing it, IIRC it was
measurably faster.

In particular, iothread.c was completely avoiding acquire/release around
non-blocking aio_poll and this patch does not have exactly the same behavior,
but it is trying to be close:

-        aio_context_acquire(iothread->ctx);
-        blocking = true;
-        while (!iothread->stopping && aio_poll(iothread->ctx, blocking)) {
-            /* Progress was made, keep going */
-            blocking = false;
-        }
-        aio_context_release(iothread->ctx);

The exact same behavior can be done easily directly in aio_poll, for this
RFC I'm keeping the code a little simpler.

Paolo

> 
> Fam
> 
>> +        }
>>          ret = WaitForMultipleObjects(count, events, FALSE, timeout);
>> +        if (timeout) {
>> +            aio_context_acquire(ctx);
>> +        }
>>          aio_set_dispatching(ctx, true);
>>  
>>          if (first && aio_bh_poll(ctx)) {
Stefan Hajnoczi March 6, 2015, 5:15 p.m. UTC | #3
On Fri, Feb 20, 2015 at 05:26:51PM +0100, Paolo Bonzini wrote:
> This is the first step in pushing down acquire/release, and will let
> rfifolock drop the contention callback feature.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  aio-posix.c         |  9 +++++++++
>  aio-win32.c         |  8 ++++++++
>  include/block/aio.h | 15 ++++++++-------
>  3 files changed, 25 insertions(+), 7 deletions(-)

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Fam Zheng July 8, 2015, 2:18 a.m. UTC | #4
On Fri, 02/20 17:26, Paolo Bonzini wrote:
> This is the first step in pushing down acquire/release, and will let
> rfifolock drop the contention callback feature.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  aio-posix.c         |  9 +++++++++
>  aio-win32.c         |  8 ++++++++
>  include/block/aio.h | 15 ++++++++-------
>  3 files changed, 25 insertions(+), 7 deletions(-)
> 
> diff --git a/aio-posix.c b/aio-posix.c
> index 4a30b77..292ae84 100644
> --- a/aio-posix.c
> +++ b/aio-posix.c
> @@ -238,6 +238,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
>      bool progress;
>      int64_t timeout;
>  
> +    aio_context_acquire(ctx);
>      was_dispatching = ctx->dispatching;
>      progress = false;
>  
> @@ -267,7 +268,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
>      timeout = blocking ? aio_compute_timeout(ctx) : 0;
>  
>      /* wait until next event */
> +    if (timeout) {
> +        aio_context_release(ctx);
> +    }
>      ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout);

If two threads poll concurrently on this ctx, they will get the same set of
events, is that safe? Doesn't that lead to double dispatch?

Fam

> +    if (timeout) {
> +        aio_context_acquire(ctx);
> +    }
>  
>      /* if we have any readable fds, dispatch event */
>      if (ret > 0) {
> @@ -285,5 +292,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
>      }
>  
>      aio_set_dispatching(ctx, was_dispatching);
> +    aio_context_release(ctx);
> +
>      return progress;
>  }
> diff --git a/aio-win32.c b/aio-win32.c
> index e6f4ced..233d8f5 100644
> --- a/aio-win32.c
> +++ b/aio-win32.c
> @@ -283,6 +283,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
>      int count;
>      int timeout;
>  
> +    aio_context_acquire(ctx);
>      have_select_revents = aio_prepare(ctx);
>      if (have_select_revents) {
>          blocking = false;
> @@ -323,7 +324,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
>  
>          timeout = blocking
>              ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
> +        if (timeout) {
> +            aio_context_release(ctx);
> +        }
>          ret = WaitForMultipleObjects(count, events, FALSE, timeout);
> +        if (timeout) {
> +            aio_context_acquire(ctx);
> +        }
>          aio_set_dispatching(ctx, true);
>  
>          if (first && aio_bh_poll(ctx)) {
> @@ -349,5 +356,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
>      progress |= timerlistgroup_run_timers(&ctx->tlg);
>  
>      aio_set_dispatching(ctx, was_dispatching);
> +    aio_context_release(ctx);
>      return progress;
>  }
> diff --git a/include/block/aio.h b/include/block/aio.h
> index 499efd0..e77409d 100644
> --- a/include/block/aio.h
> +++ b/include/block/aio.h
> @@ -118,13 +118,14 @@ void aio_context_ref(AioContext *ctx);
>  void aio_context_unref(AioContext *ctx);
>  
>  /* Take ownership of the AioContext.  If the AioContext will be shared between
> - * threads, a thread must have ownership when calling aio_poll().
> - *
> - * Note that multiple threads calling aio_poll() means timers, BHs, and
> - * callbacks may be invoked from a different thread than they were registered
> - * from.  Therefore, code must use AioContext acquire/release or use
> - * fine-grained synchronization to protect shared state if other threads will
> - * be accessing it simultaneously.
> + * threads, and a thread does not want to be interrupted, it will have to
> + * take ownership around calls to aio_poll().  Otherwise, aio_poll()
> + * automatically takes care of calling aio_context_acquire and
> + * aio_context_release.
> + *
> + * Access to timers and BHs from a thread that has not acquired AioContext
> + * is possible.  Access to callbacks for now must be done while the AioContext
> + * is owned by the thread (FIXME).
>   */
>  void aio_context_acquire(AioContext *ctx);
>  
> -- 
> 2.3.0
> 
>
Paolo Bonzini July 8, 2015, 7:52 a.m. UTC | #5
On 08/07/2015 04:18, Fam Zheng wrote:
>> > @@ -267,7 +268,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
>> >      timeout = blocking ? aio_compute_timeout(ctx) : 0;
>> >  
>> >      /* wait until next event */
>> > +    if (timeout) {
>> > +        aio_context_release(ctx);
>> > +    }
>> >      ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout);
> If two threads poll concurrently on this ctx, they will get the same set of
> events, is that safe? Doesn't that lead to double dispatch?

Yes, but handlers should be okay with spurious wakeup.  They will just
get EAGAIN.

Paolo
diff mbox

Patch

diff --git a/aio-posix.c b/aio-posix.c
index 4a30b77..292ae84 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -238,6 +238,7 @@  bool aio_poll(AioContext *ctx, bool blocking)
     bool progress;
     int64_t timeout;
 
+    aio_context_acquire(ctx);
     was_dispatching = ctx->dispatching;
     progress = false;
 
@@ -267,7 +268,13 @@  bool aio_poll(AioContext *ctx, bool blocking)
     timeout = blocking ? aio_compute_timeout(ctx) : 0;
 
     /* wait until next event */
+    if (timeout) {
+        aio_context_release(ctx);
+    }
     ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout);
+    if (timeout) {
+        aio_context_acquire(ctx);
+    }
 
     /* if we have any readable fds, dispatch event */
     if (ret > 0) {
@@ -285,5 +292,7 @@  bool aio_poll(AioContext *ctx, bool blocking)
     }
 
     aio_set_dispatching(ctx, was_dispatching);
+    aio_context_release(ctx);
+
     return progress;
 }
diff --git a/aio-win32.c b/aio-win32.c
index e6f4ced..233d8f5 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -283,6 +283,7 @@  bool aio_poll(AioContext *ctx, bool blocking)
     int count;
     int timeout;
 
+    aio_context_acquire(ctx);
     have_select_revents = aio_prepare(ctx);
     if (have_select_revents) {
         blocking = false;
@@ -323,7 +324,13 @@  bool aio_poll(AioContext *ctx, bool blocking)
 
         timeout = blocking
             ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
+        if (timeout) {
+            aio_context_release(ctx);
+        }
         ret = WaitForMultipleObjects(count, events, FALSE, timeout);
+        if (timeout) {
+            aio_context_acquire(ctx);
+        }
         aio_set_dispatching(ctx, true);
 
         if (first && aio_bh_poll(ctx)) {
@@ -349,5 +356,6 @@  bool aio_poll(AioContext *ctx, bool blocking)
     progress |= timerlistgroup_run_timers(&ctx->tlg);
 
     aio_set_dispatching(ctx, was_dispatching);
+    aio_context_release(ctx);
     return progress;
 }
diff --git a/include/block/aio.h b/include/block/aio.h
index 499efd0..e77409d 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -118,13 +118,14 @@  void aio_context_ref(AioContext *ctx);
 void aio_context_unref(AioContext *ctx);
 
 /* Take ownership of the AioContext.  If the AioContext will be shared between
- * threads, a thread must have ownership when calling aio_poll().
- *
- * Note that multiple threads calling aio_poll() means timers, BHs, and
- * callbacks may be invoked from a different thread than they were registered
- * from.  Therefore, code must use AioContext acquire/release or use
- * fine-grained synchronization to protect shared state if other threads will
- * be accessing it simultaneously.
+ * threads, and a thread does not want to be interrupted, it will have to
+ * take ownership around calls to aio_poll().  Otherwise, aio_poll()
+ * automatically takes care of calling aio_context_acquire and
+ * aio_context_release.
+ *
+ * Access to timers and BHs from a thread that has not acquired AioContext
+ * is possible.  Access to callbacks for now must be done while the AioContext
+ * is owned by the thread (FIXME).
  */
 void aio_context_acquire(AioContext *ctx);