diff mbox

[6/6] Add helper functions to enable virtio-9p make use of the threadlets

Message ID 20101118180722.4434.29121.stgit@localhost6.localdomain6
State New
Headers show

Commit Message

Arun Bharadwaj Nov. 18, 2010, 6:07 p.m. UTC
From: Gautham R Shenoy <ego@in.ibm.com>

infrastructure for offloading blocking tasks such as making posix calls on
to the helper threads and handle the post_posix_operations() from the
context of the iothread. This frees the vcpu thread to process any other guest
operations while the processing of v9fs_io is in progress.

Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Sripathi Kodi <sripathik@in.ibm.com>
Signed-off-by: Arun R Bharadwaj <arun@linux.vnet.ibm.com>
---
 hw/virtio-9p.c     |  164 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 posix-aio-compat.c |   30 +++-------
 qemu-threadlets.c  |   21 +++++++
 qemu-threadlets.h  |    1 
 vl.c               |    3 +
 5 files changed, 196 insertions(+), 23 deletions(-)

Comments

Stefan Hajnoczi Nov. 19, 2010, 10:17 a.m. UTC | #1
On Thu, Nov 18, 2010 at 6:07 PM, Arun R Bharadwaj
<arun@linux.vnet.ibm.com> wrote:
> From: Gautham R Shenoy <ego@in.ibm.com>
>
> infrastructure for offloading blocking tasks such as making posix calls on
> to the helper threads and handle the post_posix_operations() from the
> context of the iothread. This frees the vcpu thread to process any other guest
> operations while the processing of v9fs_io is in progress.
>
> Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
> Signed-off-by: Sripathi Kodi <sripathik@in.ibm.com>
> Signed-off-by: Arun R Bharadwaj <arun@linux.vnet.ibm.com>
> ---
>  hw/virtio-9p.c     |  164 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>  posix-aio-compat.c |   30 +++-------
>  qemu-threadlets.c  |   21 +++++++
>  qemu-threadlets.h  |    1
>  vl.c               |    3 +
>  5 files changed, 196 insertions(+), 23 deletions(-)

Is there code queued up which makes use of this in virtfs?  I ask
because at the moment this is deadcode.

If you are restructuring this patch series, perhaps move the signal
related changes into a commit - they are independent of virtfs.

Stefan
jvrao Nov. 30, 2010, 11:17 p.m. UTC | #2
On 11/19/2010 2:17 AM, Stefan Hajnoczi wrote:
> On Thu, Nov 18, 2010 at 6:07 PM, Arun R Bharadwaj
> <arun@linux.vnet.ibm.com> wrote:
>> From: Gautham R Shenoy <ego@in.ibm.com>
>>
>> infrastructure for offloading blocking tasks such as making posix calls on
>> to the helper threads and handle the post_posix_operations() from the
>> context of the iothread. This frees the vcpu thread to process any other guest
>> operations while the processing of v9fs_io is in progress.
>>
>> Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
>> Signed-off-by: Sripathi Kodi <sripathik@in.ibm.com>
>> Signed-off-by: Arun R Bharadwaj <arun@linux.vnet.ibm.com>
>> ---
>>  hw/virtio-9p.c     |  164 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>>  posix-aio-compat.c |   30 +++-------
>>  qemu-threadlets.c  |   21 +++++++
>>  qemu-threadlets.h  |    1
>>  vl.c               |    3 +
>>  5 files changed, 196 insertions(+), 23 deletions(-)
> 
> Is there code queued up which makes use of this in virtfs?  I ask
> because at the moment this is deadcode.
> 
> If you are restructuring this patch series, perhaps move the signal
> related changes into a commit - they are independent of virtfs.

Yep. Please separate out signal handling code and make that patch as part of
this series.
v9fs code should be part of separate patch series where we attempt to convert
v9fs function
calls to this threadlet model.

Thanks,
JV

> 
> Stefan
>
diff mbox

Patch

diff --git a/hw/virtio-9p.c b/hw/virtio-9p.c
index 7c59988..88da20f 100644
--- a/hw/virtio-9p.c
+++ b/hw/virtio-9p.c
@@ -18,6 +18,7 @@ 
 #include "fsdev/qemu-fsdev.h"
 #include "virtio-9p-debug.h"
 #include "virtio-9p-xattr.h"
+#include "qemu-threadlets.h"
 
 int debug_9p_pdu;
 
@@ -33,6 +34,149 @@  enum {
     Oappend = 0x80,
 };
 
+typedef struct V9fsPostOp {
+    QTAILQ_ENTRY(V9fsPostOp) node;
+    void (*func)(void *arg);
+    void *arg;
+} V9fsPostOp;
+
+static struct {
+    int rfd;
+    int wfd;
+    QemuMutex lock;
+    QTAILQ_HEAD(, V9fsPostOp) post_op_list;
+} v9fs_async_struct;
+
+static void die2(int err, const char *what)
+{
+    fprintf(stderr, "%s failed: %s\n", what, strerror(err));
+    abort();
+}
+
+static void die(const char *what)
+{
+    die2(errno, what);
+}
+
+#define ASYNC_MAX_PROCESS   5
+
+/**
+ * v9fs_process_post_ops: Process any pending v9fs_post_posix_operation
+ * @arg: Not used.
+ *
+ * This function serves as a callback to the iothread to be called into whenever
+ * the v9fs_async_struct.wfd is written into. This thread goes through the list
+ * of v9fs_post_posix_operations() and executes them. In the process, it might
+ * queue more job on the asynchronous thread pool.
+ */
+static void v9fs_process_post_ops(void *arg)
+{
+    int count = 0;
+    struct V9fsPostOp *post_op;
+    int ret;
+    char byte;
+
+    do {
+        ret = read(v9fs_async_struct.rfd, &byte, sizeof(byte));
+    } while (ret > 0 || (ret == -1 && errno == EINTR));
+
+    qemu_mutex_lock(&v9fs_async_struct.lock);
+    for (count = 0; count < ASYNC_MAX_PROCESS; count++) {
+        if (QTAILQ_EMPTY(&(v9fs_async_struct.post_op_list))) {
+            break;
+        }
+        post_op = QTAILQ_FIRST(&(v9fs_async_struct.post_op_list));
+        QTAILQ_REMOVE(&(v9fs_async_struct.post_op_list), post_op, node);
+
+        qemu_mutex_unlock(&v9fs_async_struct.lock);
+        post_op->func(post_op->arg);
+        qemu_free(post_op);
+        qemu_mutex_lock(&v9fs_async_struct.lock);
+    }
+    qemu_mutex_unlock(&v9fs_async_struct.lock);
+}
+
+/**
+ * v9fs_async_signal: Inform the io-thread of completion of async job.
+ *
+ * This function is used to inform the iothread that a particular
+ * async-operation pertaining to v9fs has been completed and that the io thread
+ * can handle the v9fs_post_posix_operation.
+ *
+ * This is based on the aio_signal_handler
+ */
+static inline void v9fs_async_signal(void)
+{
+    char byte = 0;
+    ssize_t ret;
+    int tries = 0;
+
+    qemu_mutex_lock(&v9fs_async_struct.lock);
+    do {
+        assert(tries != 100);
+       ret = write(v9fs_async_struct.wfd, &byte, sizeof(byte));
+        tries++;
+    } while (ret < 0 && errno == EAGAIN);
+    qemu_mutex_unlock(&v9fs_async_struct.lock);
+
+    if (ret < 0 && errno != EAGAIN) {
+        die("write() in v9fs");
+    }
+
+    if (kill(getpid(), SIGUSR2)) {
+        die("kill failed");
+    }
+}
+
+/**
+ * v9fs_async_helper_done: Marks the completion of the v9fs_async job
+ * @func: v9fs_post_posix_func() for post-processing invoked in the context of
+ *        the io-thread
+ * @arg: Argument to func.
+ *
+ * This function is called from the context of one of the asynchronous threads
+ * in the thread pool. This is called when the asynchronous thread has finished
+ * executing a v9fs_posix_operation. It's purpose is to initiate the process of
+ * informing the io-thread that the v9fs_posix_operation has completed.
+ */
+static void v9fs_async_helper_done(void (*func)(void *arg), void *arg)
+{
+    struct V9fsPostOp *post_op;
+
+    post_op = qemu_mallocz(sizeof(*post_op));
+    post_op->func = func;
+    post_op->arg = arg;
+
+    qemu_mutex_lock(&v9fs_async_struct.lock);
+    QTAILQ_INSERT_TAIL(&(v9fs_async_struct.post_op_list), post_op, node);
+    qemu_mutex_unlock(&v9fs_async_struct.lock);
+
+    v9fs_async_signal();
+}
+
+/**
+ * v9fs_do_async_posix: Offload v9fs_posix_operation onto async thread.
+ * @vs: V9fsOPState variable for the OP operation.
+ * @posix_fn: The posix function which has to be offloaded onto async thread.
+ * @post_fn_ptr: Address of the location to hold the post_fn corresponding to
+ * the posix_fn
+ * @post_fn: The post processing function corresponding to the posix_fn.
+ *
+ * This function is a helper to offload posix_operation on to the asynchronous
+ * thread pool. It sets up the associations with the post_function that needs to
+ * be invoked by from the context of the iothread once the posix_fn has been
+ * executed.
+ */
+static void v9fs_do_async_posix(ThreadletWork *work ,
+                                void (*posix_fn)(ThreadletWork *work),
+                                void (**post_fn_ptr)(void *arg),
+                                void (*post_fn)(void *arg))
+{
+    *post_fn_ptr = post_fn;
+    work->func = posix_fn;
+    submit_work(work);
+}
+
 static int omode_to_uflags(int8_t mode)
 {
     int ret = 0;
@@ -3657,7 +3801,7 @@  VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf)
     int i, len;
     struct stat stat;
     FsTypeEntry *fse;
-
+    int fds[2];
 
     s = (V9fsState *)virtio_common_init("virtio-9p",
                                     VIRTIO_ID_9P,
@@ -3740,5 +3884,23 @@  VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf)
                         s->tag_len;
     s->vdev.get_config = virtio_9p_get_config;
 
+    if (qemu_pipe(fds) == -1) {
+        fprintf(stderr, "failed to create fd's for virtio-9p\n");
+        exit(1);
+    }
+
+    v9fs_async_struct.rfd = fds[0];
+    v9fs_async_struct.wfd = fds[1];
+
+    fcntl(fds[0], F_SETFL, O_NONBLOCK);
+    fcntl(fds[1], F_SETFL, O_NONBLOCK);
+
+    qemu_set_fd_handler(fds[0], v9fs_process_post_ops, NULL, NULL);
+    QTAILQ_INIT(&v9fs_async_struct.post_op_list);
+    qemu_mutex_init(&(v9fs_async_struct.lock));
+
+    (void)v9fs_do_async_posix;
+    (void)v9fs_async_helper_done;
+
     return &s->vdev;
 }
diff --git a/posix-aio-compat.c b/posix-aio-compat.c
index 20f53c7..9639aaa 100644
--- a/posix-aio-compat.c
+++ b/posix-aio-compat.c
@@ -256,11 +256,14 @@  static ssize_t handle_aiocb_rw(struct qemu_paiocb *aiocb)
     return nbytes;
 }
 
+static PosixAioState *posix_aio_state;
+
 static void aio_thread(ThreadletWork *work)
 {
     pid_t pid;
     struct qemu_paiocb *aiocb = container_of(work, struct qemu_paiocb, work);
     ssize_t ret = 0;
+    char byte = 0;
 
     pid = getpid();
 
@@ -286,6 +289,11 @@  static void aio_thread(ThreadletWork *work)
     qemu_cond_broadcast(&aiocb_completion);
     qemu_mutex_unlock(&aiocb_mutex);
 
+    ret = write(posix_aio_state->wfd, &byte, sizeof(byte));
+    if (ret < 0 && errno != EAGAIN) {
+        die("write()");
+    }
+
     if (kill(pid, aiocb->ev_signo)) {
         die("kill failed");
     }
@@ -404,22 +412,6 @@  static int posix_aio_flush(void *opaque)
     return !!s->first_aio;
 }
 
-static PosixAioState *posix_aio_state;
-
-static void aio_signal_handler(int signum)
-{
-    if (posix_aio_state) {
-        char byte = 0;
-        ssize_t ret;
-
-        ret = write(posix_aio_state->wfd, &byte, sizeof(byte));
-        if (ret < 0 && errno != EAGAIN)
-            die("write()");
-    }
-
-    qemu_service_io();
-}
-
 static void paio_remove(struct qemu_paiocb *acb)
 {
     struct qemu_paiocb **pacb;
@@ -514,7 +506,6 @@  BlockDriverAIOCB *paio_ioctl(BlockDriverState *bs, int fd,
 
 int paio_init(void)
 {
-    struct sigaction act;
     PosixAioState *s;
     int fds[2];
 
@@ -526,11 +517,6 @@  int paio_init(void)
 
     s = qemu_malloc(sizeof(PosixAioState));
 
-    sigfillset(&act.sa_mask);
-    act.sa_flags = 0; /* do not restart syscalls to interrupt select() */
-    act.sa_handler = aio_signal_handler;
-    sigaction(SIGUSR2, &act, NULL);
-
     s->first_aio = NULL;
     if (qemu_pipe(fds) == -1) {
         fprintf(stderr, "failed to create pipe\n");
diff --git a/qemu-threadlets.c b/qemu-threadlets.c
index 23b4ecf..1e78f56 100644
--- a/qemu-threadlets.c
+++ b/qemu-threadlets.c
@@ -16,12 +16,28 @@ 
 
 #include "qemu-threadlets.h"
 #include "osdep.h"
+#include <signal.h>
 
 #define MAX_GLOBAL_THREADS  64
 #define MIN_GLOBAL_THREADS   8
 static ThreadletQueue globalqueue;
 static int globalqueue_init;
 
+static void threadlet_io_completion_signal_handler(int signum)
+{
+    qemu_service_io();
+}
+
+static void threadlet_register_signal_handler(void)
+{
+    struct sigaction act;
+
+    sigfillset(&act.sa_mask);
+    act.sa_flags = 0; /* do not restart syscalls to interrupt select() */
+    act.sa_handler = threadlet_io_completion_signal_handler;
+    sigaction(SIGUSR2, &act, NULL);
+}
+
 static void *threadlet_worker(void *data)
 {
     ThreadletQueue *queue = data;
@@ -166,3 +182,8 @@  void threadlet_queue_init(ThreadletQueue *queue,
     qemu_mutex_init(&queue->lock);
     qemu_cond_init(&queue->cond);
 }
+
+void threadlet_init(void)
+{
+   threadlet_register_signal_handler();
+}
diff --git a/qemu-threadlets.h b/qemu-threadlets.h
index 39461a5..f26c5b7 100644
--- a/qemu-threadlets.h
+++ b/qemu-threadlets.h
@@ -43,4 +43,5 @@  int dequeue_work_on_queue(ThreadletQueue *queue, ThreadletWork *work);
 int dequeue_work(ThreadletWork *work);
 void threadlet_queue_init(ThreadletQueue *queue, int max_threads,
                           int min_threads);
+void threadlet_init(void);
 #endif
diff --git a/vl.c b/vl.c
index df414ef..7b9a425 100644
--- a/vl.c
+++ b/vl.c
@@ -148,6 +148,7 @@  int main(int argc, char **argv)
 #include "qemu-config.h"
 #include "qemu-objects.h"
 #include "qemu-options.h"
+#include "qemu-threadlets.h"
 #ifdef CONFIG_VIRTFS
 #include "fsdev/qemu-fsdev.h"
 #endif
@@ -2922,6 +2923,8 @@  int main(int argc, char **argv, char **envp)
             exit(1);
     }
 
+    threadlet_init();
+
     /* init generic devices */
     if (qemu_opts_foreach(qemu_find_opts("device"), device_init_func, NULL, 1) != 0)
         exit(1);