diff mbox

[05/12] aio: introduce aio_{disable, enable}_clients

Message ID 1444369526-2227-6-git-send-email-famz@redhat.com
State New
Headers show

Commit Message

Fam Zheng Oct. 9, 2015, 5:45 a.m. UTC
Signed-off-by: Fam Zheng <famz@redhat.com>
---
 aio-posix.c         |  3 ++-
 aio-win32.c         |  3 ++-
 async.c             | 42 ++++++++++++++++++++++++++++++++++++++++++
 include/block/aio.h | 30 ++++++++++++++++++++++++++++++
 4 files changed, 76 insertions(+), 2 deletions(-)

Comments

Fam Zheng Oct. 9, 2015, 4:27 p.m. UTC | #1
On Fri, 10/09 16:31, Kevin Wolf wrote:
> Am 09.10.2015 um 07:45 hat Fam Zheng geschrieben:
> > Signed-off-by: Fam Zheng <famz@redhat.com>
> > ---
> >  aio-posix.c         |  3 ++-
> >  aio-win32.c         |  3 ++-
> >  async.c             | 42 ++++++++++++++++++++++++++++++++++++++++++
> >  include/block/aio.h | 30 ++++++++++++++++++++++++++++++
> >  4 files changed, 76 insertions(+), 2 deletions(-)
> > 
> > diff --git a/aio-posix.c b/aio-posix.c
> > index d25fcfc..a261892 100644
> > --- a/aio-posix.c
> > +++ b/aio-posix.c
> > @@ -261,7 +261,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
> >  
> >      /* fill pollfds */
> >      QLIST_FOREACH(node, &ctx->aio_handlers, node) {
> > -        if (!node->deleted && node->pfd.events) {
> > +        if (!node->deleted && node->pfd.events
> > +            && !aio_type_disabled(ctx, node->type)) {
> >              add_pollfd(node);
> >          }
> >      }
> > diff --git a/aio-win32.c b/aio-win32.c
> > index f5ecf57..66cff60 100644
> > --- a/aio-win32.c
> > +++ b/aio-win32.c
> > @@ -309,7 +309,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
> >      /* fill fd sets */
> >      count = 0;
> >      QLIST_FOREACH(node, &ctx->aio_handlers, node) {
> > -        if (!node->deleted && node->io_notify) {
> > +        if (!node->deleted && node->io_notify
> > +            && !aio_type_disabled(ctx, node->type)) {
> >              events[count++] = event_notifier_get_handle(node->e);
> >          }
> >      }
> > diff --git a/async.c b/async.c
> > index 244bf79..855b9d5 100644
> > --- a/async.c
> > +++ b/async.c
> > @@ -361,3 +361,45 @@ void aio_context_release(AioContext *ctx)
> >  {
> >      rfifolock_unlock(&ctx->lock);
> >  }
> > +
> > +bool aio_type_disabled(AioContext *ctx, int type)
> > +{
> > +    int i = 1;
> > +    int n = 0;
> > +
> > +    while (type) {
> > +        bool b = type & 0x1;
> > +        type >>= 1;
> > +        n++;
> 
> Any specific reason for leaving client_disable_counters[0] unused?

No, I should have started from 0.

> 
> > +        i <<= 1;
> 
> i is never read.
> 
> > +        if (!b) {
> > +            continue;
> > +        }
> > +        if (ctx->client_disable_counters[n]) {
> > +            return true;
> > +        }
> > +    }
> > +    return false;
> > +}
> 
> In general I wonder whether this function really needs to take a mask
> with possibly multiple set bits instead of just a single type.

Previous versions used to have more types than "internal" and "external", so it
has been a mask. So yes, I think a single type will be better now.

> 
> > +void aio_disable_enable_clients(AioContext *ctx, int clients_mask,
> > +                                bool is_disable)
> > +{
> > +    int i = 1;
> > +    int n = 0;
> > +    aio_context_acquire(ctx);
> > +
> > +    while (clients_mask) {
> > +        bool b = clients_mask & 0x1;
> > +        clients_mask >>= 1;
> > +        n++;
> > +        i <<= 1;
> 
> This i isn't used either.
> 
> > +        if (!b) {
> > +            continue;
> > +        }
> > +        if (ctx->client_disable_counters[n]) {
> > +            return true;
> > +        }
> 
> Wait, why are you checking the state instead of setting it?

Oops, apparent I screwed my workspaces as I do remember coding this assignment.
And I must have used a wrong command when building the tree so that I don't
even catch the compiling error. :(

> 
> How did you test this series?

So far only smoke testing and qemu-iotests, because I don't have a good idea of
testifying the transaction's atomicity. Any suggestions?

> 
> > +    }
> > +    aio_context_release(ctx);
> > +}
> > diff --git a/include/block/aio.h b/include/block/aio.h
> > index 60e796b..b687152 100644
> > --- a/include/block/aio.h
> > +++ b/include/block/aio.h
> > @@ -122,6 +122,8 @@ struct AioContext {
> >  
> >      /* TimerLists for calling timers - one per clock type */
> >      QEMUTimerListGroup tlg;
> > +
> > +    int client_disable_counters[sizeof(int)];
> >  };
> 
> sizeof return the size in bytes. I think you mean bits here?

You're right, will fix it.

Fam
Kevin Wolf Oct. 12, 2015, 8:31 a.m. UTC | #2
Am 09.10.2015 um 18:27 hat Fam Zheng geschrieben:
> On Fri, 10/09 16:31, Kevin Wolf wrote:
> > Am 09.10.2015 um 07:45 hat Fam Zheng geschrieben:
> > > Signed-off-by: Fam Zheng <famz@redhat.com>
> > > ---
> > >  aio-posix.c         |  3 ++-
> > >  aio-win32.c         |  3 ++-
> > >  async.c             | 42 ++++++++++++++++++++++++++++++++++++++++++
> > >  include/block/aio.h | 30 ++++++++++++++++++++++++++++++
> > >  4 files changed, 76 insertions(+), 2 deletions(-)
> > > 
> > > diff --git a/aio-posix.c b/aio-posix.c
> > > index d25fcfc..a261892 100644
> > > --- a/aio-posix.c
> > > +++ b/aio-posix.c
> > > @@ -261,7 +261,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
> > >  
> > >      /* fill pollfds */
> > >      QLIST_FOREACH(node, &ctx->aio_handlers, node) {
> > > -        if (!node->deleted && node->pfd.events) {
> > > +        if (!node->deleted && node->pfd.events
> > > +            && !aio_type_disabled(ctx, node->type)) {
> > >              add_pollfd(node);
> > >          }
> > >      }
> > > diff --git a/aio-win32.c b/aio-win32.c
> > > index f5ecf57..66cff60 100644
> > > --- a/aio-win32.c
> > > +++ b/aio-win32.c
> > > @@ -309,7 +309,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
> > >      /* fill fd sets */
> > >      count = 0;
> > >      QLIST_FOREACH(node, &ctx->aio_handlers, node) {
> > > -        if (!node->deleted && node->io_notify) {
> > > +        if (!node->deleted && node->io_notify
> > > +            && !aio_type_disabled(ctx, node->type)) {
> > >              events[count++] = event_notifier_get_handle(node->e);
> > >          }
> > >      }
> > > diff --git a/async.c b/async.c
> > > index 244bf79..855b9d5 100644
> > > --- a/async.c
> > > +++ b/async.c
> > > @@ -361,3 +361,45 @@ void aio_context_release(AioContext *ctx)
> > >  {
> > >      rfifolock_unlock(&ctx->lock);
> > >  }
> > > +
> > > +bool aio_type_disabled(AioContext *ctx, int type)
> > > +{
> > > +    int i = 1;
> > > +    int n = 0;
> > > +
> > > +    while (type) {
> > > +        bool b = type & 0x1;
> > > +        type >>= 1;
> > > +        n++;
> > 
> > Any specific reason for leaving client_disable_counters[0] unused?
> 
> No, I should have started from 0.
> 
> > 
> > > +        i <<= 1;
> > 
> > i is never read.
> > 
> > > +        if (!b) {
> > > +            continue;
> > > +        }
> > > +        if (ctx->client_disable_counters[n]) {
> > > +            return true;
> > > +        }
> > > +    }
> > > +    return false;
> > > +}
> > 
> > In general I wonder whether this function really needs to take a mask
> > with possibly multiple set bits instead of just a single type.
> 
> Previous versions used to have more types than "internal" and "external", so it
> has been a mask. So yes, I think a single type will be better now.
> 
> > 
> > > +void aio_disable_enable_clients(AioContext *ctx, int clients_mask,
> > > +                                bool is_disable)
> > > +{
> > > +    int i = 1;
> > > +    int n = 0;
> > > +    aio_context_acquire(ctx);
> > > +
> > > +    while (clients_mask) {
> > > +        bool b = clients_mask & 0x1;
> > > +        clients_mask >>= 1;
> > > +        n++;
> > > +        i <<= 1;
> > 
> > This i isn't used either.
> > 
> > > +        if (!b) {
> > > +            continue;
> > > +        }
> > > +        if (ctx->client_disable_counters[n]) {
> > > +            return true;
> > > +        }
> > 
> > Wait, why are you checking the state instead of setting it?
> 
> Oops, apparent I screwed my workspaces as I do remember coding this assignment.
> And I must have used a wrong command when building the tree so that I don't
> even catch the compiling error. :(
> 
> > 
> > How did you test this series?
> 
> So far only smoke testing and qemu-iotests, because I don't have a good idea of
> testifying the transaction's atomicity. Any suggestions?

Perhaps you could use blkdebug to delay something in the middle of the
transaction while your guest keeps writing stuff? That should result in
100% reproducability.

I guess you actually need to make sure that your guest doesn't do any
I/O, then set the blkdebug breakpoint, send the transaction, and once a
request is stopped, you start some I/O in the guest. Resume as soon as
you know that something bad happened.

Possibly you need to add a new blkdebug event to find a good place to
suspend a transaction request.

Kevin
Fam Zheng Oct. 12, 2015, 11:20 a.m. UTC | #3
On Mon, 10/12 10:31, Kevin Wolf wrote:
> Am 09.10.2015 um 18:27 hat Fam Zheng geschrieben:
> > On Fri, 10/09 16:31, Kevin Wolf wrote:
> > > Am 09.10.2015 um 07:45 hat Fam Zheng geschrieben:
> > > > Signed-off-by: Fam Zheng <famz@redhat.com>
> > > > ---
> > > >  aio-posix.c         |  3 ++-
> > > >  aio-win32.c         |  3 ++-
> > > >  async.c             | 42 ++++++++++++++++++++++++++++++++++++++++++
> > > >  include/block/aio.h | 30 ++++++++++++++++++++++++++++++
> > > >  4 files changed, 76 insertions(+), 2 deletions(-)
> > > > 
> > > > diff --git a/aio-posix.c b/aio-posix.c
> > > > index d25fcfc..a261892 100644
> > > > --- a/aio-posix.c
> > > > +++ b/aio-posix.c
> > > > @@ -261,7 +261,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
> > > >  
> > > >      /* fill pollfds */
> > > >      QLIST_FOREACH(node, &ctx->aio_handlers, node) {
> > > > -        if (!node->deleted && node->pfd.events) {
> > > > +        if (!node->deleted && node->pfd.events
> > > > +            && !aio_type_disabled(ctx, node->type)) {
> > > >              add_pollfd(node);
> > > >          }
> > > >      }
> > > > diff --git a/aio-win32.c b/aio-win32.c
> > > > index f5ecf57..66cff60 100644
> > > > --- a/aio-win32.c
> > > > +++ b/aio-win32.c
> > > > @@ -309,7 +309,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
> > > >      /* fill fd sets */
> > > >      count = 0;
> > > >      QLIST_FOREACH(node, &ctx->aio_handlers, node) {
> > > > -        if (!node->deleted && node->io_notify) {
> > > > +        if (!node->deleted && node->io_notify
> > > > +            && !aio_type_disabled(ctx, node->type)) {
> > > >              events[count++] = event_notifier_get_handle(node->e);
> > > >          }
> > > >      }
> > > > diff --git a/async.c b/async.c
> > > > index 244bf79..855b9d5 100644
> > > > --- a/async.c
> > > > +++ b/async.c
> > > > @@ -361,3 +361,45 @@ void aio_context_release(AioContext *ctx)
> > > >  {
> > > >      rfifolock_unlock(&ctx->lock);
> > > >  }
> > > > +
> > > > +bool aio_type_disabled(AioContext *ctx, int type)
> > > > +{
> > > > +    int i = 1;
> > > > +    int n = 0;
> > > > +
> > > > +    while (type) {
> > > > +        bool b = type & 0x1;
> > > > +        type >>= 1;
> > > > +        n++;
> > > 
> > > Any specific reason for leaving client_disable_counters[0] unused?
> > 
> > No, I should have started from 0.
> > 
> > > 
> > > > +        i <<= 1;
> > > 
> > > i is never read.
> > > 
> > > > +        if (!b) {
> > > > +            continue;
> > > > +        }
> > > > +        if (ctx->client_disable_counters[n]) {
> > > > +            return true;
> > > > +        }
> > > > +    }
> > > > +    return false;
> > > > +}
> > > 
> > > In general I wonder whether this function really needs to take a mask
> > > with possibly multiple set bits instead of just a single type.
> > 
> > Previous versions used to have more types than "internal" and "external", so it
> > has been a mask. So yes, I think a single type will be better now.
> > 
> > > 
> > > > +void aio_disable_enable_clients(AioContext *ctx, int clients_mask,
> > > > +                                bool is_disable)
> > > > +{
> > > > +    int i = 1;
> > > > +    int n = 0;
> > > > +    aio_context_acquire(ctx);
> > > > +
> > > > +    while (clients_mask) {
> > > > +        bool b = clients_mask & 0x1;
> > > > +        clients_mask >>= 1;
> > > > +        n++;
> > > > +        i <<= 1;
> > > 
> > > This i isn't used either.
> > > 
> > > > +        if (!b) {
> > > > +            continue;
> > > > +        }
> > > > +        if (ctx->client_disable_counters[n]) {
> > > > +            return true;
> > > > +        }
> > > 
> > > Wait, why are you checking the state instead of setting it?
> > 
> > Oops, apparent I screwed my workspaces as I do remember coding this assignment.
> > And I must have used a wrong command when building the tree so that I don't
> > even catch the compiling error. :(
> > 
> > > 
> > > How did you test this series?
> > 
> > So far only smoke testing and qemu-iotests, because I don't have a good idea of
> > testifying the transaction's atomicity. Any suggestions?
> 
> Perhaps you could use blkdebug to delay something in the middle of the
> transaction while your guest keeps writing stuff? That should result in
> 100% reproducability.
> 
> I guess you actually need to make sure that your guest doesn't do any
> I/O, then set the blkdebug breakpoint, send the transaction, and once a
> request is stopped, you start some I/O in the guest. Resume as soon as
> you know that something bad happened.
> 
> Possibly you need to add a new blkdebug event to find a good place to
> suspend a transaction request.
> 

It's difficult to "start some I/O" in the guest in the middle of transaction,
even with help of blkdebug, because BQL is hold during the whole transaction.

I think it would be a bit easier to program a VCPU to constantly submit I/O
requests to the vq, but that's far from enough.

Anyway I'll start by writing some unit test code instead, in tests/test-aio.c.

Fam
diff mbox

Patch

diff --git a/aio-posix.c b/aio-posix.c
index d25fcfc..a261892 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -261,7 +261,8 @@  bool aio_poll(AioContext *ctx, bool blocking)
 
     /* fill pollfds */
     QLIST_FOREACH(node, &ctx->aio_handlers, node) {
-        if (!node->deleted && node->pfd.events) {
+        if (!node->deleted && node->pfd.events
+            && !aio_type_disabled(ctx, node->type)) {
             add_pollfd(node);
         }
     }
diff --git a/aio-win32.c b/aio-win32.c
index f5ecf57..66cff60 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -309,7 +309,8 @@  bool aio_poll(AioContext *ctx, bool blocking)
     /* fill fd sets */
     count = 0;
     QLIST_FOREACH(node, &ctx->aio_handlers, node) {
-        if (!node->deleted && node->io_notify) {
+        if (!node->deleted && node->io_notify
+            && !aio_type_disabled(ctx, node->type)) {
             events[count++] = event_notifier_get_handle(node->e);
         }
     }
diff --git a/async.c b/async.c
index 244bf79..855b9d5 100644
--- a/async.c
+++ b/async.c
@@ -361,3 +361,45 @@  void aio_context_release(AioContext *ctx)
 {
     rfifolock_unlock(&ctx->lock);
 }
+
+bool aio_type_disabled(AioContext *ctx, int type)
+{
+    int i = 1;
+    int n = 0;
+
+    while (type) {
+        bool b = type & 0x1;
+        type >>= 1;
+        n++;
+        i <<= 1;
+        if (!b) {
+            continue;
+        }
+        if (ctx->client_disable_counters[n]) {
+            return true;
+        }
+    }
+    return false;
+}
+
+void aio_disable_enable_clients(AioContext *ctx, int clients_mask,
+                                bool is_disable)
+{
+    int i = 1;
+    int n = 0;
+    aio_context_acquire(ctx);
+
+    while (clients_mask) {
+        bool b = clients_mask & 0x1;
+        clients_mask >>= 1;
+        n++;
+        i <<= 1;
+        if (!b) {
+            continue;
+        }
+        if (ctx->client_disable_counters[n]) {
+            return true;
+        }
+    }
+    aio_context_release(ctx);
+}
diff --git a/include/block/aio.h b/include/block/aio.h
index 60e796b..b687152 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -122,6 +122,8 @@  struct AioContext {
 
     /* TimerLists for calling timers - one per clock type */
     QEMUTimerListGroup tlg;
+
+    int client_disable_counters[sizeof(int)];
 };
 
 /**
@@ -379,4 +381,32 @@  static inline void aio_timer_init(AioContext *ctx,
  */
 int64_t aio_compute_timeout(AioContext *ctx);
 
+void aio_disable_enable_clients(AioContext *ctx, int clients_mask,
+                                bool is_disable);
+/**
+ * aio_disable_clients:
+ * @ctx: the aio context
+ *
+ * Disable the furthur processing by aio_poll(ctx) of clients. This function is
+ * thread safe as it acquires/releases AioContext.
+ */
+static inline void aio_disable_clients(AioContext *ctx, int clients_mask)
+{
+    aio_disable_enable_clients(ctx, clients_mask, true);
+}
+
+/**
+ * aio_enable_clients:
+ * @ctx: the aio context
+ *
+ * Enable the processing of the clients. This function is thread safe as it
+ * acquires/releases AioContext.
+ */
+static inline void aio_enable_clients(AioContext *ctx, int clients_mask)
+{
+    aio_disable_enable_clients(ctx, clients_mask, false);
+}
+
+bool aio_type_disabled(AioContext *ctx, int type);
+
 #endif