diff mbox

[V4,04/13] hw/9pfs: File system helper process for qemu 9p proxy FS

Message ID 1323101930-27163-5-git-send-email-mohan@in.ibm.com
State New
Headers show

Commit Message

M. Mohan Kumar Dec. 5, 2011, 4:18 p.m. UTC
From: "M. Mohan Kumar" <mohan@in.ibm.com>

Provide root privilege access to QEMU 9p proxy filesystem using socket
communication.

Proxy helper is started by root user as:
~ # virtfs-proxy-helper -f|--fd <socket descriptor> -p|--path <path-to-share>

Signed-off-by: M. Mohan Kumar <mohan@in.ibm.com>
---
 Makefile                    |    3 +
 configure                   |   19 +++
 fsdev/virtfs-proxy-helper.c |  308 +++++++++++++++++++++++++++++++++++++++++++
 hw/9pfs/virtio-9p-proxy.h   |    9 ++
 4 files changed, 339 insertions(+), 0 deletions(-)
 create mode 100644 fsdev/virtfs-proxy-helper.c

Comments

Stefan Hajnoczi Dec. 8, 2011, 6:31 p.m. UTC | #1
On Mon, Dec 05, 2011 at 09:48:41PM +0530, M. Mohan Kumar wrote:
> +static int read_request(int sockfd, struct iovec *iovec, ProxyHeader *header)
> +{
> +    int retval;
> +
> +    /*
> +     * read the request header.
> +     */
> +    iovec->iov_len = 0;
> +    retval = socket_read(sockfd, iovec->iov_base, PROXY_HDR_SZ);
> +    if (retval < 0) {
> +        return retval;
> +    }
> +    iovec->iov_len = PROXY_HDR_SZ;
> +    retval = proxy_unmarshal(iovec, 0, "dd", &header->type, &header->size);
> +    if (retval < 0) {
> +        return retval;
> +    }
> +    /*
> +     * We can't process message.size > PROXY_MAX_IO_SZ, read the complete
> +     * message from the socket and ignore it. This ensures that
> +     * we can correctly handle the next request. We also return
> +     * ENOBUFS as error to indicate we ran out of buffer space.
> +     */
> +    if (header->size > PROXY_MAX_IO_SZ) {
> +        int count, size;
> +        size = header->size;
> +        while (size > 0) {
> +            count = MIN(PROXY_MAX_IO_SZ, size);
> +            count = socket_read(sockfd, iovec->iov_base + PROXY_HDR_SZ, count);
> +            if (count < 0) {
> +                return count;
> +            }
> +            size -= count;
> +        }

I'm not sure recovery attempts are worthwhile here.  The client is
buggy, perhaps just refuse further work.

> +        return -ENOBUFS;
> +    }

header->size is (signed) int and we didn't check for header->size < 0.
Please use an unsigned type.

> +    if (chroot(rpath) < 0) {
> +        do_perror("chroot");
> +        goto error;
> +    }
> +    umask(0);

We haven't changed into the chroot yet, we need chdir("/").  Otherwise
the current working directory is outside the chroot (and allows trivial
escape).
Mohan Kumar M Dec. 9, 2011, 4:42 p.m. UTC | #2
On Friday, December 09, 2011 12:01:14 AM Stefan Hajnoczi wrote:
> On Mon, Dec 05, 2011 at 09:48:41PM +0530, M. Mohan Kumar wrote:
> > +static int read_request(int sockfd, struct iovec *iovec, ProxyHeader
> > *header) +{
> > +    int retval;
> > +
> > +    /*
> > +     * read the request header.
> > +     */
> > +    iovec->iov_len = 0;
> > +    retval = socket_read(sockfd, iovec->iov_base, PROXY_HDR_SZ);
> > +    if (retval < 0) {
> > +        return retval;
> > +    }
> > +    iovec->iov_len = PROXY_HDR_SZ;
> > +    retval = proxy_unmarshal(iovec, 0, "dd", &header->type,
> > &header->size); +    if (retval < 0) {
> > +        return retval;
> > +    }
> > +    /*
> > +     * We can't process message.size > PROXY_MAX_IO_SZ, read the
> > complete +     * message from the socket and ignore it. This ensures
> > that +     * we can correctly handle the next request. We also return + 
> >    * ENOBUFS as error to indicate we ran out of buffer space. +     */
> > +    if (header->size > PROXY_MAX_IO_SZ) {
> > +        int count, size;
> > +        size = header->size;
> > +        while (size > 0) {
> > +            count = MIN(PROXY_MAX_IO_SZ, size);
> > +            count = socket_read(sockfd, iovec->iov_base + PROXY_HDR_SZ,
> > count); +            if (count < 0) {
> > +                return count;
> > +            }
> > +            size -= count;
> > +        }
> 
> I'm not sure recovery attempts are worthwhile here.  The client is
> buggy, perhaps just refuse further work.

But whats the issue in trying to recover in this case?
> 
> > +        return -ENOBUFS;
> > +    }
> 
> header->size is (signed) int and we didn't check for header->size < 0.
> Please use an unsigned type.

I will fix in next version

> 
> > +    if (chroot(rpath) < 0) {
> > +        do_perror("chroot");
> > +        goto error;
> > +    }
> > +    umask(0);
> 
> We haven't changed into the chroot yet, we need chdir("/").  Otherwise
> the current working directory is outside the chroot (and allows trivial
> escape).
I will fix in next version
Stefan Hajnoczi Dec. 12, 2011, 12:08 p.m. UTC | #3
On Fri, Dec 09, 2011 at 10:12:17PM +0530, M. Mohan Kumar wrote:
> On Friday, December 09, 2011 12:01:14 AM Stefan Hajnoczi wrote:
> > On Mon, Dec 05, 2011 at 09:48:41PM +0530, M. Mohan Kumar wrote:
> > > +static int read_request(int sockfd, struct iovec *iovec, ProxyHeader
> > > *header) +{
> > > +    int retval;
> > > +
> > > +    /*
> > > +     * read the request header.
> > > +     */
> > > +    iovec->iov_len = 0;
> > > +    retval = socket_read(sockfd, iovec->iov_base, PROXY_HDR_SZ);
> > > +    if (retval < 0) {
> > > +        return retval;
> > > +    }
> > > +    iovec->iov_len = PROXY_HDR_SZ;
> > > +    retval = proxy_unmarshal(iovec, 0, "dd", &header->type,
> > > &header->size); +    if (retval < 0) {
> > > +        return retval;
> > > +    }
> > > +    /*
> > > +     * We can't process message.size > PROXY_MAX_IO_SZ, read the
> > > complete +     * message from the socket and ignore it. This ensures
> > > that +     * we can correctly handle the next request. We also return + 
> > >    * ENOBUFS as error to indicate we ran out of buffer space. +     */
> > > +    if (header->size > PROXY_MAX_IO_SZ) {
> > > +        int count, size;
> > > +        size = header->size;
> > > +        while (size > 0) {
> > > +            count = MIN(PROXY_MAX_IO_SZ, size);
> > > +            count = socket_read(sockfd, iovec->iov_base + PROXY_HDR_SZ,
> > > count); +            if (count < 0) {
> > > +                return count;
> > > +            }
> > > +            size -= count;
> > > +        }
> > 
> > I'm not sure recovery attempts are worthwhile here.  The client is
> > buggy, perhaps just refuse further work.
> 
> But whats the issue in trying to recover in this case?

This recovery procedure is not robust because it does not always work.
In fact it only works in the case where the header->size field was
out-of-range but accurate.  That's not a likely case since the QEMU-side
code that you are writing should handle this.

If the nature of the invalid request is different, either a broken or
malicious client which does not send a valid header->size then we're
stuck in this special-case recovery trying to gobble bytes and we never
log an error.

A real recovery would be something like disconnecting and
re-establishing the connection between QEMU and the helper.  This would
allow us to get back to a clean state in all cases.

Stefan
Aneesh Kumar K.V Dec. 12, 2011, 3:21 p.m. UTC | #4
On Mon, 12 Dec 2011 12:08:33 +0000, Stefan Hajnoczi <stefanha@gmail.com> wrote:
> On Fri, Dec 09, 2011 at 10:12:17PM +0530, M. Mohan Kumar wrote:
> > On Friday, December 09, 2011 12:01:14 AM Stefan Hajnoczi wrote:
> > > On Mon, Dec 05, 2011 at 09:48:41PM +0530, M. Mohan Kumar wrote:
> > > > +static int read_request(int sockfd, struct iovec *iovec, ProxyHeader
> > > > *header) +{
> > > > +    int retval;
> > > > +
> > > > +    /*
> > > > +     * read the request header.
> > > > +     */
> > > > +    iovec->iov_len = 0;
> > > > +    retval = socket_read(sockfd, iovec->iov_base, PROXY_HDR_SZ);
> > > > +    if (retval < 0) {
> > > > +        return retval;
> > > > +    }
> > > > +    iovec->iov_len = PROXY_HDR_SZ;
> > > > +    retval = proxy_unmarshal(iovec, 0, "dd", &header->type,
> > > > &header->size); +    if (retval < 0) {
> > > > +        return retval;
> > > > +    }
> > > > +    /*
> > > > +     * We can't process message.size > PROXY_MAX_IO_SZ, read the
> > > > complete +     * message from the socket and ignore it. This ensures
> > > > that +     * we can correctly handle the next request. We also return + 
> > > >    * ENOBUFS as error to indicate we ran out of buffer space. +     */
> > > > +    if (header->size > PROXY_MAX_IO_SZ) {
> > > > +        int count, size;
> > > > +        size = header->size;
> > > > +        while (size > 0) {
> > > > +            count = MIN(PROXY_MAX_IO_SZ, size);
> > > > +            count = socket_read(sockfd, iovec->iov_base + PROXY_HDR_SZ,
> > > > count); +            if (count < 0) {
> > > > +                return count;
> > > > +            }
> > > > +            size -= count;
> > > > +        }
> > > 
> > > I'm not sure recovery attempts are worthwhile here.  The client is
> > > buggy, perhaps just refuse further work.
> > 
> > But whats the issue in trying to recover in this case?
> 
> This recovery procedure is not robust because it does not always work.
> In fact it only works in the case where the header->size field was
> out-of-range but accurate.  That's not a likely case since the QEMU-side
> code that you are writing should handle this.
> 
> If the nature of the invalid request is different, either a broken or
> malicious client which does not send a valid header->size then we're
> stuck in this special-case recovery trying to gobble bytes and we never
> log an error.
> 
> A real recovery would be something like disconnecting and
> re-establishing the connection between QEMU and the helper.  This would
> allow us to get back to a clean state in all cases.
> 

Since we are not having any state in the proxy helper, returning ENOBUFS
should be similar to the above right ? One of the reason to try to
recover as much as possible, is to make sure the guest can umount the
file system properly. That is if we hit these error condition due to a
bug in proxy FS driver is qemu, we want to make sure we return some
valid error, which will atleast enable the guest/client to do an umount.

-aneesh
Stefan Hajnoczi Dec. 12, 2011, 3:56 p.m. UTC | #5
On Mon, Dec 12, 2011 at 3:21 PM, Aneesh Kumar K.V
<aneesh.kumar@linux.vnet.ibm.com> wrote:
> On Mon, 12 Dec 2011 12:08:33 +0000, Stefan Hajnoczi <stefanha@gmail.com> wrote:
>> On Fri, Dec 09, 2011 at 10:12:17PM +0530, M. Mohan Kumar wrote:
>> > On Friday, December 09, 2011 12:01:14 AM Stefan Hajnoczi wrote:
>> > > On Mon, Dec 05, 2011 at 09:48:41PM +0530, M. Mohan Kumar wrote:
>> > > > +static int read_request(int sockfd, struct iovec *iovec, ProxyHeader
>> > > > *header) +{
>> > > > +    int retval;
>> > > > +
>> > > > +    /*
>> > > > +     * read the request header.
>> > > > +     */
>> > > > +    iovec->iov_len = 0;
>> > > > +    retval = socket_read(sockfd, iovec->iov_base, PROXY_HDR_SZ);
>> > > > +    if (retval < 0) {
>> > > > +        return retval;
>> > > > +    }
>> > > > +    iovec->iov_len = PROXY_HDR_SZ;
>> > > > +    retval = proxy_unmarshal(iovec, 0, "dd", &header->type,
>> > > > &header->size); +    if (retval < 0) {
>> > > > +        return retval;
>> > > > +    }
>> > > > +    /*
>> > > > +     * We can't process message.size > PROXY_MAX_IO_SZ, read the
>> > > > complete +     * message from the socket and ignore it. This ensures
>> > > > that +     * we can correctly handle the next request. We also return +
>> > > >    * ENOBUFS as error to indicate we ran out of buffer space. +     */
>> > > > +    if (header->size > PROXY_MAX_IO_SZ) {
>> > > > +        int count, size;
>> > > > +        size = header->size;
>> > > > +        while (size > 0) {
>> > > > +            count = MIN(PROXY_MAX_IO_SZ, size);
>> > > > +            count = socket_read(sockfd, iovec->iov_base + PROXY_HDR_SZ,
>> > > > count); +            if (count < 0) {
>> > > > +                return count;
>> > > > +            }
>> > > > +            size -= count;
>> > > > +        }
>> > >
>> > > I'm not sure recovery attempts are worthwhile here.  The client is
>> > > buggy, perhaps just refuse further work.
>> >
>> > But whats the issue in trying to recover in this case?
>>
>> This recovery procedure is not robust because it does not always work.
>> In fact it only works in the case where the header->size field was
>> out-of-range but accurate.  That's not a likely case since the QEMU-side
>> code that you are writing should handle this.
>>
>> If the nature of the invalid request is different, either a broken or
>> malicious client which does not send a valid header->size then we're
>> stuck in this special-case recovery trying to gobble bytes and we never
>> log an error.
>>
>> A real recovery would be something like disconnecting and
>> re-establishing the connection between QEMU and the helper.  This would
>> allow us to get back to a clean state in all cases.
>>
>
> Since we are not having any state in the proxy helper, returning ENOBUFS
> should be similar to the above right ? One of the reason to try to
> recover as much as possible, is to make sure the guest can umount the
> file system properly. That is if we hit these error condition due to a
> bug in proxy FS driver is qemu, we want to make sure we return some
> valid error, which will atleast enable the guest/client to do an umount.

When the helper detects something outside the protocol specification
it needs to terminate the connection.  The protocol has no reliable
way to skip the junk coming over the socket so we can't process the
"next" message.

The flipside to "try to recover as much as possible" is "damage as
little as possible".  We don't want to mis-interpret requests on this
broken connection and corrupt the user's data.

I'm happy with any scheme as long as it handles all error cases.  The
problem with the -ENOBUFS case was that it is pretty artificial
(unlikely to happen) and doesn't handle cases where header->size is
inaccurate.

Stefan
diff mbox

Patch

diff --git a/Makefile b/Makefile
index 301c75e..1906c5e 100644
--- a/Makefile
+++ b/Makefile
@@ -154,6 +154,9 @@  qemu-img$(EXESUF): qemu-img.o $(tools-obj-y) $(block-obj-y)
 qemu-nbd$(EXESUF): qemu-nbd.o $(tools-obj-y) $(block-obj-y)
 qemu-io$(EXESUF): qemu-io.o cmd.o $(tools-obj-y) $(block-obj-y)
 
+fsdev/virtfs-proxy-helper$(EXESUF): fsdev/virtfs-proxy-helper.o fsdev/virtio-9p-marshal.o
+fsdev/virtfs-proxy-helper$(EXESUF): LIBS += -lcap
+
 qemu-img-cmds.h: $(SRC_PATH)/qemu-img-cmds.hx
 	$(call quiet-command,sh $(SRC_PATH)/scripts/hxtool -h < $< > $@,"  GEN   $@")
 
diff --git a/configure b/configure
index ac4840d..4ecdb1c 100755
--- a/configure
+++ b/configure
@@ -1938,6 +1938,22 @@  else
 fi
 
 ##########################################
+# libcap probe
+
+if test "$cap" != "no" ; then
+  cat > $TMPC <<EOF
+#include <stdio.h>
+#include <sys/capability.h>
+int main(void) { cap_t caps; caps = cap_init(); }
+EOF
+  if compile_prog "" "-lcap" ; then
+    cap=yes
+  else
+    cap=no
+  fi
+fi
+
+##########################################
 # pthread probe
 PTHREADLIBS_LIST="-pthread -lpthread -lpthreadGC2"
 
@@ -2735,6 +2751,9 @@  confdir=$sysconfdir$confsuffix
 tools=
 if test "$softmmu" = yes ; then
   tools="qemu-img\$(EXESUF) qemu-io\$(EXESUF) $tools"
+  if [ "$cap" = "yes" -a "$linux" = "yes" ] ; then
+      tools="$tools fsdev/virtfs-proxy-helper\$(EXESUF)"
+  fi
   if [ "$linux" = "yes" -o "$bsd" = "yes" -o "$solaris" = "yes" ] ; then
       tools="qemu-nbd\$(EXESUF) $tools"
     if [ "$guest_agent" = "yes" ]; then
diff --git a/fsdev/virtfs-proxy-helper.c b/fsdev/virtfs-proxy-helper.c
new file mode 100644
index 0000000..7670a0e
--- /dev/null
+++ b/fsdev/virtfs-proxy-helper.c
@@ -0,0 +1,308 @@ 
+/*
+ * Helper for QEMU Proxy FS Driver
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * M. Mohan Kumar <mohan@in.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+#include <stdio.h>
+#include <string.h>
+#include <sys/un.h>
+#include <limits.h>
+#include <signal.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <getopt.h>
+#include <unistd.h>
+#include <syslog.h>
+#include <sys/capability.h>
+#include <sys/fsuid.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include "qemu-common.h"
+#include "virtio-9p-marshal.h"
+#include "hw/9pfs/virtio-9p-proxy.h"
+
+#define PROGNAME "virtfs-proxy-helper"
+
+static struct option helper_opts[] = {
+    {"fd", required_argument, NULL, 'f'},
+    {"path", required_argument, NULL, 'p'},
+    {"nodaemon", no_argument, NULL, 'n'},
+};
+
+static bool is_daemon;
+
+static void do_log(int loglevel, const char *format, ...)
+{
+    va_list ap;
+
+    va_start(ap, format);
+    if (is_daemon) {
+        vsyslog(LOG_CRIT, format, ap);
+    } else {
+        vfprintf(stderr, format, ap);
+    }
+    va_end(ap);
+}
+
+static void do_perror(const char *string)
+{
+    if (is_daemon) {
+        syslog(LOG_CRIT, "%s:%s", string, strerror(errno));
+    } else {
+        fprintf(stderr, "%s:%s\n", string, strerror(errno));
+    }
+}
+
+static int do_cap_set(cap_value_t *cap_value, int size, int reset)
+{
+    cap_t caps;
+    if (reset) {
+        /*
+         * Start with an empty set and set permitted and effective
+         */
+        caps = cap_init();
+        if (caps == NULL) {
+            do_perror("cap_init");
+            return -1;
+        }
+        if (cap_set_flag(caps, CAP_PERMITTED, size, cap_value, CAP_SET) < 0) {
+            do_perror("cap_set_flag");
+            goto error;
+        }
+    } else {
+        caps = cap_get_proc();
+        if (!caps) {
+            do_perror("cap_get_proc");
+            return -1;
+        }
+    }
+    if (cap_set_flag(caps, CAP_EFFECTIVE, size, cap_value, CAP_SET) < 0) {
+        do_perror("cap_set_flag");
+        goto error;
+    }
+    if (cap_set_proc(caps) < 0) {
+        do_perror("cap_set_proc");
+        goto error;
+    }
+    cap_free(caps);
+    return 0;
+
+error:
+    cap_free(caps);
+    return -1;
+}
+
+static int init_capabilities(void)
+{
+    /* helper needs following capbabilities only */
+    cap_value_t cap_list[] = {
+        CAP_CHOWN,
+        CAP_DAC_OVERRIDE,
+        CAP_FOWNER,
+        CAP_FSETID,
+        CAP_SETGID,
+        CAP_MKNOD,
+        CAP_SETUID,
+    };
+    return do_cap_set(cap_list, ARRAY_SIZE(cap_list), 1);
+}
+
+static int socket_read(int sockfd, void *buff, ssize_t size)
+{
+    ssize_t retval, total = 0;
+
+    while (size) {
+        retval = read(sockfd, buff, size);
+        if (retval == 0) {
+            return -EIO;
+        }
+        if (retval < 0) {
+            if (errno == EINTR) {
+                continue;
+            }
+            return -errno;
+        }
+        size -= retval;
+        buff += retval;
+        total += retval;
+    }
+    return total;
+}
+
+static int socket_write(int sockfd, void *buff, ssize_t size)
+{
+    ssize_t retval, total = 0;
+
+    while (size) {
+        retval = write(sockfd, buff, size);
+        if (retval < 0) {
+            if (errno == EINTR) {
+                continue;
+            }
+            return -errno;
+        }
+        size -= retval;
+        buff += retval;
+        total += retval;
+    }
+    return total;
+}
+
+static int read_request(int sockfd, struct iovec *iovec, ProxyHeader *header)
+{
+    int retval;
+
+    /*
+     * read the request header.
+     */
+    iovec->iov_len = 0;
+    retval = socket_read(sockfd, iovec->iov_base, PROXY_HDR_SZ);
+    if (retval < 0) {
+        return retval;
+    }
+    iovec->iov_len = PROXY_HDR_SZ;
+    retval = proxy_unmarshal(iovec, 0, "dd", &header->type, &header->size);
+    if (retval < 0) {
+        return retval;
+    }
+    /*
+     * We can't process message.size > PROXY_MAX_IO_SZ, read the complete
+     * message from the socket and ignore it. This ensures that
+     * we can correctly handle the next request. We also return
+     * ENOBUFS as error to indicate we ran out of buffer space.
+     */
+    if (header->size > PROXY_MAX_IO_SZ) {
+        int count, size;
+        size = header->size;
+        while (size > 0) {
+            count = MIN(PROXY_MAX_IO_SZ, size);
+            count = socket_read(sockfd, iovec->iov_base + PROXY_HDR_SZ, count);
+            if (count < 0) {
+                return count;
+            }
+            size -= count;
+        }
+        return -ENOBUFS;
+    }
+    retval = socket_read(sockfd, iovec->iov_base + PROXY_HDR_SZ, header->size);
+    if (retval < 0) {
+        return retval;
+    }
+    iovec->iov_len += header->size;
+    return 0;
+}
+
+static void usage(char *prog)
+{
+    fprintf(stderr, "usage: %s\n"
+            " -p|--path <path> 9p path to export\n"
+            " {-f|--fd <socket-descriptor>} socket file descriptor to be used\n"
+            " [-n|--nodaemon] Run as a normal program\n",
+            basename(prog));
+}
+
+static int process_requests(int sock)
+{
+    int retval;
+    ProxyHeader header;
+    struct iovec in_iovec;
+
+    in_iovec.iov_base = g_malloc(PROXY_MAX_IO_SZ + PROXY_HDR_SZ);
+    in_iovec.iov_len  = PROXY_MAX_IO_SZ + PROXY_HDR_SZ;
+    while (1) {
+        retval = read_request(sock, &in_iovec, &header);
+        if (retval < 0) {
+            goto error;
+        }
+    }
+    (void)socket_write;
+error:
+    g_free(in_iovec.iov_base);
+    return -1;
+}
+
+int main(int argc, char **argv)
+{
+    int sock;
+    char *rpath = NULL;
+    struct stat stbuf;
+    int c, option_index;
+
+    is_daemon = true;
+    sock = -1;
+    while (1) {
+        option_index = 0;
+        c = getopt_long(argc, argv, "p:nh?f:", helper_opts,
+                        &option_index);
+        if (c == -1) {
+            break;
+        }
+        switch (c) {
+        case 'p':
+            rpath = strdup(optarg);
+            break;
+        case 'n':
+            is_daemon = false;
+            break;
+        case 'f':
+            sock = atoi(optarg);
+            break;
+        case '?':
+        case 'h':
+        default:
+            usage(argv[0]);
+            exit(EXIT_FAILURE);
+        }
+    }
+
+    /* Parameter validation */
+    if (sock == -1 || rpath == NULL) {
+        fprintf(stderr, "socket descriptor or path not specified\n");
+        usage(argv[0]);
+        exit(EXIT_FAILURE);
+    }
+
+    if (lstat(rpath, &stbuf) < 0) {
+        fprintf(stderr, "invalid path \"%s\" specified, %s\n",
+                rpath, strerror(errno));
+        exit(EXIT_FAILURE);
+    }
+
+    if (!S_ISDIR(stbuf.st_mode)) {
+        fprintf(stderr, "specified path \"%s\" is not directory\n", rpath);
+        exit(EXIT_FAILURE);
+    }
+
+    if (is_daemon) {
+        if (daemon(0, 0) < 0) {
+            fprintf(stderr, "daemon call failed\n");
+            exit(EXIT_FAILURE);
+        }
+        openlog(PROGNAME, LOG_PID, LOG_DAEMON);
+    }
+
+    do_log(LOG_INFO, "Started\n");
+
+    if (chroot(rpath) < 0) {
+        do_perror("chroot");
+        goto error;
+    }
+    umask(0);
+
+    if (init_capabilities() < 0) {
+        goto error;
+    }
+
+    process_requests(sock);
+error:
+    do_log(LOG_INFO, "Done\n");
+    closelog();
+    return 0;
+}
diff --git a/hw/9pfs/virtio-9p-proxy.h b/hw/9pfs/virtio-9p-proxy.h
index b3c59c7..c33a84b 100644
--- a/hw/9pfs/virtio-9p-proxy.h
+++ b/hw/9pfs/virtio-9p-proxy.h
@@ -14,6 +14,15 @@ 
 
 #define PROXY_MAX_IO_SZ (64 * 1024)
 
+/*
+ * proxy iovec only support one element and
+ * marsha/unmarshal doesn't do little endian conversion.
+ */
+#define proxy_unmarshal(in_sg, offset, fmt, args...) \
+    v9fs_unmarshal(in_sg, 1, offset, 0, fmt, ##args)
+#define proxy_marshal(out_sg, offset, fmt, args...) \
+    v9fs_marshal(out_sg, 1, offset, 0, fmt, ##args)
+
 typedef struct {
     int type;
     int size;