Patchwork [v1,2/3] : Helper routines to use GLib threadpool infrastructure in 9pfs.

login
register
mail settings
Submitter Arun Bharadwaj
Date March 15, 2011, 10:38 a.m.
Message ID <20110315103803.GC23922@linux.vnet.ibm.com>
Download mbox | patch
Permalink /patch/86937/
State New
Headers show

Comments

Arun Bharadwaj - March 15, 2011, 10:38 a.m.
* Arun R Bharadwaj <arun@linux.vnet.ibm.com> [2011-03-15 16:04:53]:

Author: Arun R Bharadwaj <arun@linux.vnet.ibm.com>
Date:   Thu Mar 10 15:11:49 2011 +0530

    Helper routines to use GLib threadpool infrastructure in 9pfs.
    
    This patch creates helper routines to make use of the
    threadpool infrastructure provided by GLib. This is based
    on the prototype patch by Anthony which does a similar thing
    for posix-aio-compat.c
    
    An example use case is provided in the next patch where one
    of the syscalls in 9pfs is converted into the threaded model
    using these helper routines.
    
    Signed-off-by: Arun R Bharadwaj <arun@linux.vnet.ibm.com>
    Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Harsh Prateek Bora - March 15, 2011, 11:45 a.m.
On 03/15/2011 04:08 PM, Arun R Bharadwaj wrote:
> * Arun R Bharadwaj<arun@linux.vnet.ibm.com>  [2011-03-15 16:04:53]:
>
> Author: Arun R Bharadwaj<arun@linux.vnet.ibm.com>
> Date:   Thu Mar 10 15:11:49 2011 +0530
>
>      Helper routines to use GLib threadpool infrastructure in 9pfs.
>
>      This patch creates helper routines to make use of the
>      threadpool infrastructure provided by GLib. This is based
>      on the prototype patch by Anthony which does a similar thing
>      for posix-aio-compat.c
>
>      An example use case is provided in the next patch where one
>      of the syscalls in 9pfs is converted into the threaded model
>      using these helper routines.
>
>      Signed-off-by: Arun R Bharadwaj<arun@linux.vnet.ibm.com>
>      Reviewed-by: Aneesh Kumar K.V<aneesh.kumar@linux.vnet.ibm.com>
>
> diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c
> index dceefd5..cf61345 100644
> --- a/hw/9pfs/virtio-9p.c
> +++ b/hw/9pfs/virtio-9p.c
> @@ -18,6 +18,8 @@
>   #include "fsdev/qemu-fsdev.h"
>   #include "virtio-9p-debug.h"
>   #include "virtio-9p-xattr.h"
> +#include "signal.h"
> +#include "qemu-thread.h"
>
>   int debug_9p_pdu;
>   static void v9fs_reclaim_fd(V9fsState *s);
> @@ -36,6 +38,89 @@ enum {
>       Oappend = 0x80,
>   };
>
> +typedef struct V9fsPool {
> +    GThreadPool *pool;
> +    GList *requests;
> +    int rfd;
> +    int wfd;
> +} V9fsPool;
> +
> +static V9fsPool v9fs_pool;
> +
> +static void v9fs_qemu_submit_request(V9fsRequest *req)
> +{
> +    V9fsPool *p =&v9fs_pool;
> +
> +    p->requests = g_list_append(p->requests, req);
> +    g_thread_pool_push(v9fs_pool.pool, req, NULL);
> +}
> +
> +static void die2(int err, const char *what)
> +{
> +    fprintf(stderr, "%s failed: %s\n", what, strerror(err));
> +    abort();
> +}
> +
> +static void die(const char *what)
> +{
> +    die2(errno, what);
> +}
> +
> +static void v9fs_qemu_process_post_ops(void *arg)
> +{
> +    struct V9fsPool *p =&v9fs_pool;
> +    struct V9fsPostOp *post_op;
> +    char byte;
> +    ssize_t len;
> +    GList *cur_req, *next_req;
> +
> +    do {
> +        len = read(p->rfd,&byte, sizeof(byte));
> +    } while (len == -1&&  errno == EINTR);
> +
> +    for (cur_req = p->requests; cur_req != NULL; cur_req = next_req) {
> +        V9fsRequest *req = cur_req->data;
> +        next_req = g_list_next(cur_req);
> +
> +        if (!req->done) {
> +            continue;
> +        }
> +
> +        post_op =&req->post_op;
> +        post_op->func(post_op->arg);
> +        p->requests = g_list_remove_link(p->requests, cur_req);
> +        g_list_free(p->requests);
> +    }
> +}
> +
> +static inline void v9fs_thread_signal(void)
> +{
> +    struct V9fsPool *p =&v9fs_pool;
> +    char byte = 0;
> +    ssize_t ret;
> +
> +    do {
> +        ret = write(p->wfd,&byte, sizeof(byte));
> +    } while (ret == -1&&  errno == EINTR);
> +
> +    if (ret<  0&&  errno != EAGAIN) {
> +        die("write() in v9fs");
> +    }
> +
> +    if (kill(getpid(), SIGUSR2)) {

Not sure whether using pthread_kill or qemu_thread_signal is better to 
go with?

> +        die("kill failed");
> +    }
> +}
> +
> +static void v9fs_thread_routine(gpointer data, gpointer user_data)
> +{
> +    V9fsRequest *req = data;
> +
> +    req->func(req);
> +    v9fs_thread_signal();
> +    req->done = 1;

Shouldn't it be in reverse order, setting flag first and then signal:
     req->done = 1;
     v9fs_thread_signal();

> +}
> +
>   static int omode_to_uflags(int8_t mode)
>   {
>       int ret = 0;
> @@ -3850,7 +3935,8 @@ VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf)
>       int i, len;
>       struct stat stat;
>       FsTypeEntry *fse;
> -
> +    int fds[2];
> +    V9fsPool *p =&v9fs_pool;
>
>       s = (V9fsState *)virtio_common_init("virtio-9p",
>                                       VIRTIO_ID_9P,
> @@ -3939,5 +4025,21 @@ VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf)
>                           s->tag_len;
>       s->vdev.get_config = virtio_9p_get_config;
>
> +    if (qemu_pipe(fds) == -1) {
> +        fprintf(stderr, "failed to create fd's for virtio-9p\n");
> +        exit(1);
> +    }
> +
> +    p->pool = g_thread_pool_new(v9fs_thread_routine, p, 8, FALSE, NULL);
> +    p->rfd = fds[0];
> +    p->wfd = fds[1];
> +
> +    fcntl(p->rfd, F_SETFL, O_NONBLOCK);
> +    fcntl(p->wfd, F_SETFL, O_NONBLOCK);
> +
> +    qemu_set_fd_handler(p->rfd, v9fs_qemu_process_post_ops, NULL, NULL);
> +
> +    (void) v9fs_qemu_submit_request;

Do we really need it ^ ?

- Harsh

> +
>       return&s->vdev;
>   }
> diff --git a/hw/9pfs/virtio-9p.h b/hw/9pfs/virtio-9p.h
> index 10809ba..e7d2326 100644
> --- a/hw/9pfs/virtio-9p.h
> +++ b/hw/9pfs/virtio-9p.h
> @@ -124,6 +124,20 @@ struct V9fsPDU
>       QLIST_ENTRY(V9fsPDU) next;
>   };
>
> +typedef struct V9fsPostOp {
> +    /* Post Operation routine to execute after executing syscall */
> +    void (*func)(void *arg);
> +    void *arg;
> +} V9fsPostOp;
> +
> +typedef struct V9fsRequest {
> +    void (*func)(struct V9fsRequest *req);
> +
> +    /* Flag to indicate that request is satisfied, ready for post-processing */
> +    int done;
> +
> +    V9fsPostOp post_op;
> +} V9fsRequest;
>
>   /* FIXME
>    * 1) change user needs to set groups and stuff
>
>
Anthony Liguori - March 15, 2011, 1:13 p.m.
On 03/15/2011 05:38 AM, Arun R Bharadwaj wrote:
> * Arun R Bharadwaj<arun@linux.vnet.ibm.com>  [2011-03-15 16:04:53]:
>
> Author: Arun R Bharadwaj<arun@linux.vnet.ibm.com>
> Date:   Thu Mar 10 15:11:49 2011 +0530
>
>      Helper routines to use GLib threadpool infrastructure in 9pfs.
>
>      This patch creates helper routines to make use of the
>      threadpool infrastructure provided by GLib. This is based
>      on the prototype patch by Anthony which does a similar thing
>      for posix-aio-compat.c
>
>      An example use case is provided in the next patch where one
>      of the syscalls in 9pfs is converted into the threaded model
>      using these helper routines.
>
>      Signed-off-by: Arun R Bharadwaj<arun@linux.vnet.ibm.com>
>      Reviewed-by: Aneesh Kumar K.V<aneesh.kumar@linux.vnet.ibm.com>

Why even bothering signaling for completion with the virtio-9p threadpool?

There's no sane guest that's going to poll on virtio-9p completion with 
interrupts disabled and no timer.  Once we enable the I/O thread by 
default, it won't even be necessary for the paio layer.

Regards,

Anthony Liguori

> diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c
> index dceefd5..cf61345 100644
> --- a/hw/9pfs/virtio-9p.c
> +++ b/hw/9pfs/virtio-9p.c
> @@ -18,6 +18,8 @@
>   #include "fsdev/qemu-fsdev.h"
>   #include "virtio-9p-debug.h"
>   #include "virtio-9p-xattr.h"
> +#include "signal.h"
> +#include "qemu-thread.h"
>
>   int debug_9p_pdu;
>   static void v9fs_reclaim_fd(V9fsState *s);
> @@ -36,6 +38,89 @@ enum {
>       Oappend = 0x80,
>   };
>
> +typedef struct V9fsPool {
> +    GThreadPool *pool;
> +    GList *requests;
> +    int rfd;
> +    int wfd;
> +} V9fsPool;
> +
> +static V9fsPool v9fs_pool;
> +
> +static void v9fs_qemu_submit_request(V9fsRequest *req)
> +{
> +    V9fsPool *p =&v9fs_pool;
> +
> +    p->requests = g_list_append(p->requests, req);
> +    g_thread_pool_push(v9fs_pool.pool, req, NULL);
> +}
> +
> +static void die2(int err, const char *what)
> +{
> +    fprintf(stderr, "%s failed: %s\n", what, strerror(err));
> +    abort();
> +}
> +
> +static void die(const char *what)
> +{
> +    die2(errno, what);
> +}
> +
> +static void v9fs_qemu_process_post_ops(void *arg)
> +{
> +    struct V9fsPool *p =&v9fs_pool;
> +    struct V9fsPostOp *post_op;
> +    char byte;
> +    ssize_t len;
> +    GList *cur_req, *next_req;
> +
> +    do {
> +        len = read(p->rfd,&byte, sizeof(byte));
> +    } while (len == -1&&  errno == EINTR);
> +
> +    for (cur_req = p->requests; cur_req != NULL; cur_req = next_req) {
> +        V9fsRequest *req = cur_req->data;
> +        next_req = g_list_next(cur_req);
> +
> +        if (!req->done) {
> +            continue;
> +        }
> +
> +        post_op =&req->post_op;
> +        post_op->func(post_op->arg);
> +        p->requests = g_list_remove_link(p->requests, cur_req);
> +        g_list_free(p->requests);
> +    }
> +}
> +
> +static inline void v9fs_thread_signal(void)
> +{
> +    struct V9fsPool *p =&v9fs_pool;
> +    char byte = 0;
> +    ssize_t ret;
> +
> +    do {
> +        ret = write(p->wfd,&byte, sizeof(byte));
> +    } while (ret == -1&&  errno == EINTR);
> +
> +    if (ret<  0&&  errno != EAGAIN) {
> +        die("write() in v9fs");
> +    }
> +
> +    if (kill(getpid(), SIGUSR2)) {
> +        die("kill failed");
> +    }
> +}
> +
> +static void v9fs_thread_routine(gpointer data, gpointer user_data)
> +{
> +    V9fsRequest *req = data;
> +
> +    req->func(req);
> +    v9fs_thread_signal();
> +    req->done = 1;
> +}
> +
>   static int omode_to_uflags(int8_t mode)
>   {
>       int ret = 0;
> @@ -3850,7 +3935,8 @@ VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf)
>       int i, len;
>       struct stat stat;
>       FsTypeEntry *fse;
> -
> +    int fds[2];
> +    V9fsPool *p =&v9fs_pool;
>
>       s = (V9fsState *)virtio_common_init("virtio-9p",
>                                       VIRTIO_ID_9P,
> @@ -3939,5 +4025,21 @@ VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf)
>                           s->tag_len;
>       s->vdev.get_config = virtio_9p_get_config;
>
> +    if (qemu_pipe(fds) == -1) {
> +        fprintf(stderr, "failed to create fd's for virtio-9p\n");
> +        exit(1);
> +    }
> +
> +    p->pool = g_thread_pool_new(v9fs_thread_routine, p, 8, FALSE, NULL);
> +    p->rfd = fds[0];
> +    p->wfd = fds[1];
> +
> +    fcntl(p->rfd, F_SETFL, O_NONBLOCK);
> +    fcntl(p->wfd, F_SETFL, O_NONBLOCK);
> +
> +    qemu_set_fd_handler(p->rfd, v9fs_qemu_process_post_ops, NULL, NULL);
> +
> +    (void) v9fs_qemu_submit_request;
> +
>       return&s->vdev;
>   }
> diff --git a/hw/9pfs/virtio-9p.h b/hw/9pfs/virtio-9p.h
> index 10809ba..e7d2326 100644
> --- a/hw/9pfs/virtio-9p.h
> +++ b/hw/9pfs/virtio-9p.h
> @@ -124,6 +124,20 @@ struct V9fsPDU
>       QLIST_ENTRY(V9fsPDU) next;
>   };
>
> +typedef struct V9fsPostOp {
> +    /* Post Operation routine to execute after executing syscall */
> +    void (*func)(void *arg);
> +    void *arg;
> +} V9fsPostOp;
> +
> +typedef struct V9fsRequest {
> +    void (*func)(struct V9fsRequest *req);
> +
> +    /* Flag to indicate that request is satisfied, ready for post-processing */
> +    int done;
> +
> +    V9fsPostOp post_op;
> +} V9fsRequest;
>
>   /* FIXME
>    * 1) change user needs to set groups and stuff
Arun Bharadwaj - March 15, 2011, 3:30 p.m.
* Harsh Bora <harsh@linux.vnet.ibm.com> [2011-03-15 17:15:48]:

> On 03/15/2011 04:08 PM, Arun R Bharadwaj wrote:
> >* Arun R Bharadwaj<arun@linux.vnet.ibm.com>  [2011-03-15 16:04:53]:
> >
> >Author: Arun R Bharadwaj<arun@linux.vnet.ibm.com>
> >Date:   Thu Mar 10 15:11:49 2011 +0530
> >
> >     Helper routines to use GLib threadpool infrastructure in 9pfs.
> >
> >     This patch creates helper routines to make use of the
> >     threadpool infrastructure provided by GLib. This is based
> >     on the prototype patch by Anthony which does a similar thing
> >     for posix-aio-compat.c
> >
> >     An example use case is provided in the next patch where one
> >     of the syscalls in 9pfs is converted into the threaded model
> >     using these helper routines.
> >
> >     Signed-off-by: Arun R Bharadwaj<arun@linux.vnet.ibm.com>
> >     Reviewed-by: Aneesh Kumar K.V<aneesh.kumar@linux.vnet.ibm.com>
> >
> >diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c
> >index dceefd5..cf61345 100644
> >--- a/hw/9pfs/virtio-9p.c
> >+++ b/hw/9pfs/virtio-9p.c
> >@@ -18,6 +18,8 @@
> >  #include "fsdev/qemu-fsdev.h"
> >  #include "virtio-9p-debug.h"
> >  #include "virtio-9p-xattr.h"
> >+#include "signal.h"
> >+#include "qemu-thread.h"
> >
> >  int debug_9p_pdu;
> >  static void v9fs_reclaim_fd(V9fsState *s);
> >@@ -36,6 +38,89 @@ enum {
> >      Oappend = 0x80,
> >  };
> >
> >+typedef struct V9fsPool {
> >+    GThreadPool *pool;
> >+    GList *requests;
> >+    int rfd;
> >+    int wfd;
> >+} V9fsPool;
> >+
> >+static V9fsPool v9fs_pool;
> >+
> >+static void v9fs_qemu_submit_request(V9fsRequest *req)
> >+{
> >+    V9fsPool *p =&v9fs_pool;
> >+
> >+    p->requests = g_list_append(p->requests, req);
> >+    g_thread_pool_push(v9fs_pool.pool, req, NULL);
> >+}
> >+
> >+static void die2(int err, const char *what)
> >+{
> >+    fprintf(stderr, "%s failed: %s\n", what, strerror(err));
> >+    abort();
> >+}
> >+
> >+static void die(const char *what)
> >+{
> >+    die2(errno, what);
> >+}
> >+
> >+static void v9fs_qemu_process_post_ops(void *arg)
> >+{
> >+    struct V9fsPool *p =&v9fs_pool;
> >+    struct V9fsPostOp *post_op;
> >+    char byte;
> >+    ssize_t len;
> >+    GList *cur_req, *next_req;
> >+
> >+    do {
> >+        len = read(p->rfd,&byte, sizeof(byte));
> >+    } while (len == -1&&  errno == EINTR);
> >+
> >+    for (cur_req = p->requests; cur_req != NULL; cur_req = next_req) {
> >+        V9fsRequest *req = cur_req->data;
> >+        next_req = g_list_next(cur_req);
> >+
> >+        if (!req->done) {
> >+            continue;
> >+        }
> >+
> >+        post_op =&req->post_op;
> >+        post_op->func(post_op->arg);
> >+        p->requests = g_list_remove_link(p->requests, cur_req);
> >+        g_list_free(p->requests);
> >+    }
> >+}
> >+
> >+static inline void v9fs_thread_signal(void)
> >+{
> >+    struct V9fsPool *p =&v9fs_pool;
> >+    char byte = 0;
> >+    ssize_t ret;
> >+
> >+    do {
> >+        ret = write(p->wfd,&byte, sizeof(byte));
> >+    } while (ret == -1&&  errno == EINTR);
> >+
> >+    if (ret<  0&&  errno != EAGAIN) {
> >+        die("write() in v9fs");
> >+    }
> >+
> >+    if (kill(getpid(), SIGUSR2)) {
> 
> Not sure whether using pthread_kill or qemu_thread_signal is better
> to go with?
> 

Please check the other replies. Looks like we don't really need
signalling.

> >+        die("kill failed");
> >+    }
> >+}
> >+
> >+static void v9fs_thread_routine(gpointer data, gpointer user_data)
> >+{
> >+    V9fsRequest *req = data;
> >+
> >+    req->func(req);
> >+    v9fs_thread_signal();
> >+    req->done = 1;
> 
> Shouldn't it be in reverse order, setting flag first and then signal:
>     req->done = 1;
>     v9fs_thread_signal();
> 
> >+}
> >+
> >  static int omode_to_uflags(int8_t mode)
> >  {
> >      int ret = 0;
> >@@ -3850,7 +3935,8 @@ VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf)
> >      int i, len;
> >      struct stat stat;
> >      FsTypeEntry *fse;
> >-
> >+    int fds[2];
> >+    V9fsPool *p =&v9fs_pool;
> >
> >      s = (V9fsState *)virtio_common_init("virtio-9p",
> >                                      VIRTIO_ID_9P,
> >@@ -3939,5 +4025,21 @@ VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf)
> >                          s->tag_len;
> >      s->vdev.get_config = virtio_9p_get_config;
> >
> >+    if (qemu_pipe(fds) == -1) {
> >+        fprintf(stderr, "failed to create fd's for virtio-9p\n");
> >+        exit(1);
> >+    }
> >+
> >+    p->pool = g_thread_pool_new(v9fs_thread_routine, p, 8, FALSE, NULL);
> >+    p->rfd = fds[0];
> >+    p->wfd = fds[1];
> >+
> >+    fcntl(p->rfd, F_SETFL, O_NONBLOCK);
> >+    fcntl(p->wfd, F_SETFL, O_NONBLOCK);
> >+
> >+    qemu_set_fd_handler(p->rfd, v9fs_qemu_process_post_ops, NULL, NULL);
> >+
> >+    (void) v9fs_qemu_submit_request;
> 
> Do we really need it ^ ?
> 

Yeah. Because till we make use of v9fs_qemu_submit_request() in the
next patch, we will get a "v9fs_qemu_submit_request defined but not
used" compile error.

> - Harsh
> 
> >+
> >      return&s->vdev;
> >  }
> >diff --git a/hw/9pfs/virtio-9p.h b/hw/9pfs/virtio-9p.h
> >index 10809ba..e7d2326 100644
> >--- a/hw/9pfs/virtio-9p.h
> >+++ b/hw/9pfs/virtio-9p.h
> >@@ -124,6 +124,20 @@ struct V9fsPDU
> >      QLIST_ENTRY(V9fsPDU) next;
> >  };
> >
> >+typedef struct V9fsPostOp {
> >+    /* Post Operation routine to execute after executing syscall */
> >+    void (*func)(void *arg);
> >+    void *arg;
> >+} V9fsPostOp;
> >+
> >+typedef struct V9fsRequest {
> >+    void (*func)(struct V9fsRequest *req);
> >+
> >+    /* Flag to indicate that request is satisfied, ready for post-processing */
> >+    int done;
> >+
> >+    V9fsPostOp post_op;
> >+} V9fsRequest;
> >
> >  /* FIXME
> >   * 1) change user needs to set groups and stuff
> >
> >
>
Arun Bharadwaj - March 15, 2011, 3:33 p.m.
* Anthony Liguori <aliguori@us.ibm.com> [2011-03-15 08:13:19]:

> On 03/15/2011 05:38 AM, Arun R Bharadwaj wrote:
> >* Arun R Bharadwaj<arun@linux.vnet.ibm.com>  [2011-03-15 16:04:53]:
> >
> >Author: Arun R Bharadwaj<arun@linux.vnet.ibm.com>
> >Date:   Thu Mar 10 15:11:49 2011 +0530
> >
> >     Helper routines to use GLib threadpool infrastructure in 9pfs.
> >
> >     This patch creates helper routines to make use of the
> >     threadpool infrastructure provided by GLib. This is based
> >     on the prototype patch by Anthony which does a similar thing
> >     for posix-aio-compat.c
> >
> >     An example use case is provided in the next patch where one
> >     of the syscalls in 9pfs is converted into the threaded model
> >     using these helper routines.
> >
> >     Signed-off-by: Arun R Bharadwaj<arun@linux.vnet.ibm.com>
> >     Reviewed-by: Aneesh Kumar K.V<aneesh.kumar@linux.vnet.ibm.com>
> 
> Why even bothering signaling for completion with the virtio-9p threadpool?
> 
> There's no sane guest that's going to poll on virtio-9p completion
> with interrupts disabled and no timer.  Once we enable the I/O
> thread by default, it won't even be necessary for the paio layer.
> 
> Regards,
> 
> Anthony Liguori
> 

Thanks for the information. I will remove the signalling part,
test the code again and re-post.

> >diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c
> >index dceefd5..cf61345 100644
> >--- a/hw/9pfs/virtio-9p.c
> >+++ b/hw/9pfs/virtio-9p.c
> >@@ -18,6 +18,8 @@
> >  #include "fsdev/qemu-fsdev.h"
> >  #include "virtio-9p-debug.h"
> >  #include "virtio-9p-xattr.h"
> >+#include "signal.h"
> >+#include "qemu-thread.h"
> >
> >  int debug_9p_pdu;
> >  static void v9fs_reclaim_fd(V9fsState *s);
> >@@ -36,6 +38,89 @@ enum {
> >      Oappend = 0x80,
> >  };
> >
> >+typedef struct V9fsPool {
> >+    GThreadPool *pool;
> >+    GList *requests;
> >+    int rfd;
> >+    int wfd;
> >+} V9fsPool;
> >+
> >+static V9fsPool v9fs_pool;
> >+
> >+static void v9fs_qemu_submit_request(V9fsRequest *req)
> >+{
> >+    V9fsPool *p =&v9fs_pool;
> >+
> >+    p->requests = g_list_append(p->requests, req);
> >+    g_thread_pool_push(v9fs_pool.pool, req, NULL);
> >+}
> >+
> >+static void die2(int err, const char *what)
> >+{
> >+    fprintf(stderr, "%s failed: %s\n", what, strerror(err));
> >+    abort();
> >+}
> >+
> >+static void die(const char *what)
> >+{
> >+    die2(errno, what);
> >+}
> >+
> >+static void v9fs_qemu_process_post_ops(void *arg)
> >+{
> >+    struct V9fsPool *p =&v9fs_pool;
> >+    struct V9fsPostOp *post_op;
> >+    char byte;
> >+    ssize_t len;
> >+    GList *cur_req, *next_req;
> >+
> >+    do {
> >+        len = read(p->rfd,&byte, sizeof(byte));
> >+    } while (len == -1&&  errno == EINTR);
> >+
> >+    for (cur_req = p->requests; cur_req != NULL; cur_req = next_req) {
> >+        V9fsRequest *req = cur_req->data;
> >+        next_req = g_list_next(cur_req);
> >+
> >+        if (!req->done) {
> >+            continue;
> >+        }
> >+
> >+        post_op =&req->post_op;
> >+        post_op->func(post_op->arg);
> >+        p->requests = g_list_remove_link(p->requests, cur_req);
> >+        g_list_free(p->requests);
> >+    }
> >+}
> >+
> >+static inline void v9fs_thread_signal(void)
> >+{
> >+    struct V9fsPool *p =&v9fs_pool;
> >+    char byte = 0;
> >+    ssize_t ret;
> >+
> >+    do {
> >+        ret = write(p->wfd,&byte, sizeof(byte));
> >+    } while (ret == -1&&  errno == EINTR);
> >+
> >+    if (ret<  0&&  errno != EAGAIN) {
> >+        die("write() in v9fs");
> >+    }
> >+
> >+    if (kill(getpid(), SIGUSR2)) {
> >+        die("kill failed");
> >+    }
> >+}
> >+
> >+static void v9fs_thread_routine(gpointer data, gpointer user_data)
> >+{
> >+    V9fsRequest *req = data;
> >+
> >+    req->func(req);
> >+    v9fs_thread_signal();
> >+    req->done = 1;
> >+}
> >+
> >  static int omode_to_uflags(int8_t mode)
> >  {
> >      int ret = 0;
> >@@ -3850,7 +3935,8 @@ VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf)
> >      int i, len;
> >      struct stat stat;
> >      FsTypeEntry *fse;
> >-
> >+    int fds[2];
> >+    V9fsPool *p =&v9fs_pool;
> >
> >      s = (V9fsState *)virtio_common_init("virtio-9p",
> >                                      VIRTIO_ID_9P,
> >@@ -3939,5 +4025,21 @@ VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf)
> >                          s->tag_len;
> >      s->vdev.get_config = virtio_9p_get_config;
> >
> >+    if (qemu_pipe(fds) == -1) {
> >+        fprintf(stderr, "failed to create fd's for virtio-9p\n");
> >+        exit(1);
> >+    }
> >+
> >+    p->pool = g_thread_pool_new(v9fs_thread_routine, p, 8, FALSE, NULL);
> >+    p->rfd = fds[0];
> >+    p->wfd = fds[1];
> >+
> >+    fcntl(p->rfd, F_SETFL, O_NONBLOCK);
> >+    fcntl(p->wfd, F_SETFL, O_NONBLOCK);
> >+
> >+    qemu_set_fd_handler(p->rfd, v9fs_qemu_process_post_ops, NULL, NULL);
> >+
> >+    (void) v9fs_qemu_submit_request;
> >+
> >      return&s->vdev;
> >  }
> >diff --git a/hw/9pfs/virtio-9p.h b/hw/9pfs/virtio-9p.h
> >index 10809ba..e7d2326 100644
> >--- a/hw/9pfs/virtio-9p.h
> >+++ b/hw/9pfs/virtio-9p.h
> >@@ -124,6 +124,20 @@ struct V9fsPDU
> >      QLIST_ENTRY(V9fsPDU) next;
> >  };
> >
> >+typedef struct V9fsPostOp {
> >+    /* Post Operation routine to execute after executing syscall */
> >+    void (*func)(void *arg);
> >+    void *arg;
> >+} V9fsPostOp;
> >+
> >+typedef struct V9fsRequest {
> >+    void (*func)(struct V9fsRequest *req);
> >+
> >+    /* Flag to indicate that request is satisfied, ready for post-processing */
> >+    int done;
> >+
> >+    V9fsPostOp post_op;
> >+} V9fsRequest;
> >
> >  /* FIXME
> >   * 1) change user needs to set groups and stuff
>
Stefan Hajnoczi - March 16, 2011, 9:20 a.m.
On Tue, Mar 15, 2011 at 1:13 PM, Anthony Liguori <aliguori@us.ibm.com> wrote:
> Why even bothering signaling for completion with the virtio-9p threadpool?
>
> There's no sane guest that's going to poll on virtio-9p completion with
> interrupts disabled and no timer.  Once we enable the I/O thread by default,
> it won't even be necessary for the paio layer.

It's not just about preventing livelock under extreme cases.  If you
omit the signal then !CONFIG_IOTHREAD builds will see increased
latency because virtfs request completion will piggy-back on other
events that *do* interrupt the vcpu.  I'm no fan of !CONFIG_IOTHREAD
but skipping signals is currently bad practice and will continue to be
until CONFIG_IOTHREAD is removed entirely.

The proper solution would be a thin abstraction for thread-safe
notification that compiles out signals when CONFIG_IOTHREAD is used.
Then we have one place in QEMU that does signalling correctly and we
can optimize it or remove CONFIG_IOTHREAD completely without having
the burden of duplicating this code in several places.

Stefan
Anthony Liguori - March 16, 2011, 1:10 p.m.
On 03/16/2011 04:20 AM, Stefan Hajnoczi wrote:
> On Tue, Mar 15, 2011 at 1:13 PM, Anthony Liguori<aliguori@us.ibm.com>  wrote:
>> Why even bothering signaling for completion with the virtio-9p threadpool?
>>
>> There's no sane guest that's going to poll on virtio-9p completion with
>> interrupts disabled and no timer.  Once we enable the I/O thread by default,
>> it won't even be necessary for the paio layer.
> It's not just about preventing livelock under extreme cases.  If you
> omit the signal then !CONFIG_IOTHREAD builds will see increased
> latency because virtfs request completion will piggy-back on other
> events that *do* interrupt the vcpu.

But realistically, the timer is firing at a high enough frequency that I 
doubt you'd even observe the latency.

There's an easy solution here, just do some sniff testing to see if you 
can tell the difference.  I bet you can't.

>    I'm no fan of !CONFIG_IOTHREAD
> but skipping signals is currently bad practice and will continue to be
> until CONFIG_IOTHREAD is removed entirely.
>
> The proper solution would be a thin abstraction for thread-safe
> notification that compiles out signals when CONFIG_IOTHREAD is used.
> Then we have one place in QEMU that does signalling correctly and we
> can optimize it or remove CONFIG_IOTHREAD completely without having
> the burden of duplicating this code in several places.

We have probably 5 different ways to wake up a CPU.  I don't think we 
should add a new one just for this.

!CONFIG_IOTHREAD needs to go away in the very near future.  I'd rather 
focus on that.

Regards,

Anthony Liguori

> Stefan
jvrao - March 16, 2011, 2:20 p.m.
On 3/16/2011 6:10 AM, Anthony Liguori wrote:
> On 03/16/2011 04:20 AM, Stefan Hajnoczi wrote:
>> On Tue, Mar 15, 2011 at 1:13 PM, Anthony Liguori<aliguori@us.ibm.com>  wrote:
>>> Why even bothering signaling for completion with the virtio-9p threadpool?
>>>
>>> There's no sane guest that's going to poll on virtio-9p completion with
>>> interrupts disabled and no timer.  Once we enable the I/O thread by default,
>>> it won't even be necessary for the paio layer.
>> It's not just about preventing livelock under extreme cases.  If you
>> omit the signal then !CONFIG_IOTHREAD builds will see increased
>> latency because virtfs request completion will piggy-back on other
>> events that *do* interrupt the vcpu.
> 
> But realistically, the timer is firing at a high enough frequency that I doubt
> you'd even observe the latency.
> 
> There's an easy solution here, just do some sniff testing to see if you can tell
> the difference.  I bet you can't.
> 
>>    I'm no fan of !CONFIG_IOTHREAD
>> but skipping signals is currently bad practice and will continue to be
>> until CONFIG_IOTHREAD is removed entirely.
>>
>> The proper solution would be a thin abstraction for thread-safe
>> notification that compiles out signals when CONFIG_IOTHREAD is used.
>> Then we have one place in QEMU that does signalling correctly and we
>> can optimize it or remove CONFIG_IOTHREAD completely without having
>> the burden of duplicating this code in several places.
> 
> We have probably 5 different ways to wake up a CPU.  I don't think we should add
> a new one just for this.
> 
> !CONFIG_IOTHREAD needs to go away in the very near future.  I'd rather focus on
> that.

If that is the case, how about making VirtFS dependent on CONFIG_IOTHREAD
during the configuration? This can help even if !CONFIG_IOTHREAD lingers around
for some more time and would avoid any of the Stefan's concerns.

- JV

> 
> Regards,
> 
> Anthony Liguori
> 
>> Stefan
>
Stefan Hajnoczi - March 16, 2011, 5:03 p.m.
On Wed, Mar 16, 2011 at 2:20 PM, Venkateswararao Jujjuri (JV)
<jvrao@linux.vnet.ibm.com> wrote:
> On 3/16/2011 6:10 AM, Anthony Liguori wrote:
>> On 03/16/2011 04:20 AM, Stefan Hajnoczi wrote:
>>> On Tue, Mar 15, 2011 at 1:13 PM, Anthony Liguori<aliguori@us.ibm.com>  wrote:
>>>> Why even bothering signaling for completion with the virtio-9p threadpool?
>>>>
>>>> There's no sane guest that's going to poll on virtio-9p completion with
>>>> interrupts disabled and no timer.  Once we enable the I/O thread by default,
>>>> it won't even be necessary for the paio layer.
>>> It's not just about preventing livelock under extreme cases.  If you
>>> omit the signal then !CONFIG_IOTHREAD builds will see increased
>>> latency because virtfs request completion will piggy-back on other
>>> events that *do* interrupt the vcpu.
>>
>> But realistically, the timer is firing at a high enough frequency that I doubt
>> you'd even observe the latency.
>>
>> There's an easy solution here, just do some sniff testing to see if you can tell
>> the difference.  I bet you can't.
>>
>>>    I'm no fan of !CONFIG_IOTHREAD
>>> but skipping signals is currently bad practice and will continue to be
>>> until CONFIG_IOTHREAD is removed entirely.
>>>
>>> The proper solution would be a thin abstraction for thread-safe
>>> notification that compiles out signals when CONFIG_IOTHREAD is used.
>>> Then we have one place in QEMU that does signalling correctly and we
>>> can optimize it or remove CONFIG_IOTHREAD completely without having
>>> the burden of duplicating this code in several places.
>>
>> We have probably 5 different ways to wake up a CPU.  I don't think we should add
>> a new one just for this.
>>
>> !CONFIG_IOTHREAD needs to go away in the very near future.  I'd rather focus on
>> that.
>
> If that is the case, how about making VirtFS dependent on CONFIG_IOTHREAD
> during the configuration? This can help even if !CONFIG_IOTHREAD lingers around
> for some more time and would avoid any of the Stefan's concerns.

Sounds fair.

Stefan

Patch

diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c
index dceefd5..cf61345 100644
--- a/hw/9pfs/virtio-9p.c
+++ b/hw/9pfs/virtio-9p.c
@@ -18,6 +18,8 @@ 
 #include "fsdev/qemu-fsdev.h"
 #include "virtio-9p-debug.h"
 #include "virtio-9p-xattr.h"
+#include "signal.h"
+#include "qemu-thread.h"
 
 int debug_9p_pdu;
 static void v9fs_reclaim_fd(V9fsState *s);
@@ -36,6 +38,89 @@  enum {
     Oappend = 0x80,
 };
 
+typedef struct V9fsPool {
+    GThreadPool *pool;
+    GList *requests;
+    int rfd;
+    int wfd;
+} V9fsPool;
+
+static V9fsPool v9fs_pool;
+
+static void v9fs_qemu_submit_request(V9fsRequest *req)
+{
+    V9fsPool *p = &v9fs_pool;
+
+    p->requests = g_list_append(p->requests, req);
+    g_thread_pool_push(v9fs_pool.pool, req, NULL);
+}
+
+static void die2(int err, const char *what)
+{
+    fprintf(stderr, "%s failed: %s\n", what, strerror(err));
+    abort();
+}
+
+static void die(const char *what)
+{
+    die2(errno, what);
+}
+
+static void v9fs_qemu_process_post_ops(void *arg)
+{
+    struct V9fsPool *p = &v9fs_pool;
+    struct V9fsPostOp *post_op;
+    char byte;
+    ssize_t len;
+    GList *cur_req, *next_req;
+
+    do {
+        len = read(p->rfd, &byte, sizeof(byte));
+    } while (len == -1 && errno == EINTR);
+
+    for (cur_req = p->requests; cur_req != NULL; cur_req = next_req) {
+        V9fsRequest *req = cur_req->data;
+        next_req = g_list_next(cur_req);
+
+        if (!req->done) {
+            continue;
+        }
+
+        post_op = &req->post_op;
+        post_op->func(post_op->arg);
+        p->requests = g_list_remove_link(p->requests, cur_req);
+        g_list_free(p->requests);
+    }
+}
+
+static inline void v9fs_thread_signal(void)
+{
+    struct V9fsPool *p = &v9fs_pool;
+    char byte = 0;
+    ssize_t ret;
+
+    do {
+        ret = write(p->wfd, &byte, sizeof(byte));
+    } while (ret == -1 && errno == EINTR);
+
+    if (ret < 0 && errno != EAGAIN) {
+        die("write() in v9fs");
+    }
+
+    if (kill(getpid(), SIGUSR2)) {
+        die("kill failed");
+    }
+}
+
+static void v9fs_thread_routine(gpointer data, gpointer user_data)
+{
+    V9fsRequest *req = data;
+
+    req->func(req);
+    v9fs_thread_signal();
+    req->done = 1;
+}
+
 static int omode_to_uflags(int8_t mode)
 {
     int ret = 0;
@@ -3850,7 +3935,8 @@  VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf)
     int i, len;
     struct stat stat;
     FsTypeEntry *fse;
-
+    int fds[2];
+    V9fsPool *p = &v9fs_pool;
 
     s = (V9fsState *)virtio_common_init("virtio-9p",
                                     VIRTIO_ID_9P,
@@ -3939,5 +4025,21 @@  VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf)
                         s->tag_len;
     s->vdev.get_config = virtio_9p_get_config;
 
+    if (qemu_pipe(fds) == -1) {
+        fprintf(stderr, "failed to create fd's for virtio-9p\n");
+        exit(1);
+    }
+
+    p->pool = g_thread_pool_new(v9fs_thread_routine, p, 8, FALSE, NULL);
+    p->rfd = fds[0];
+    p->wfd = fds[1];
+
+    fcntl(p->rfd, F_SETFL, O_NONBLOCK);
+    fcntl(p->wfd, F_SETFL, O_NONBLOCK);
+
+    qemu_set_fd_handler(p->rfd, v9fs_qemu_process_post_ops, NULL, NULL);
+
+    (void) v9fs_qemu_submit_request;
+
     return &s->vdev;
 }
diff --git a/hw/9pfs/virtio-9p.h b/hw/9pfs/virtio-9p.h
index 10809ba..e7d2326 100644
--- a/hw/9pfs/virtio-9p.h
+++ b/hw/9pfs/virtio-9p.h
@@ -124,6 +124,20 @@  struct V9fsPDU
     QLIST_ENTRY(V9fsPDU) next;
 };
 
+typedef struct V9fsPostOp {
+    /* Post Operation routine to execute after executing syscall */
+    void (*func)(void *arg);
+    void *arg;
+} V9fsPostOp;
+
+typedef struct V9fsRequest {
+    void (*func)(struct V9fsRequest *req);
+
+    /* Flag to indicate that request is satisfied, ready for post-processing */
+    int done;
+
+    V9fsPostOp post_op;
+} V9fsRequest;
 
 /* FIXME
  * 1) change user needs to set groups and stuff