Patchwork [V4,3/8] Add client side interfaces for chroot environment

login
register
mail settings
Submitter Mohan Kumar M
Date Feb. 1, 2011, 5:26 a.m.
Message ID <1296537992-16687-1-git-send-email-mohan@in.ibm.com>
Download mbox | patch
Permalink /patch/81275/
State New
Headers show

Comments

Mohan Kumar M - Feb. 1, 2011, 5:26 a.m.
Define QEMU side interfaces used for chroot environment.

Signed-off-by: M. Mohan Kumar <mohan@in.ibm.com>
---
 hw/9pfs/virtio-9p-chroot.c |   87 ++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 87 insertions(+), 0 deletions(-)
Stefan Hajnoczi - Feb. 2, 2011, 9:54 a.m.
On Tue, Feb 1, 2011 at 5:26 AM, M. Mohan Kumar <mohan@in.ibm.com> wrote:
> +/* Receive file descriptor and error status from chroot process */
> +static int v9fs_receivefd(int sockfd, int *error)

The return value and int *error overlap in functionality.  Would it be
possible to have only one mechanism for returning errors?

*error = 0 is never done so a caller that passes an uninitialized
local variable gets back junk when the function succeeds.  It would be
safer to clear it at the start of this function.

Inconsistent use of errno constants and -1:
return -EIO;
return -1; /* == -EPERM, probably not what you wanted */

How about getting rid of int *error and returning the -errno?  If
if_error is set then return -fd_info.error.

> +/*
> + * V9fsFileObjectRequest is written into the socket by QEMU process.
> + * Then this request is read by chroot process using read_request function
> + */
> +static int v9fs_write_request(int sockfd, V9fsFileObjectRequest *request)
> +{
> +    int retval, length;
> +    char *buff, *buffp;
> +
> +    length = sizeof(request->data) + request->data.path_len +
> +                    request->data.oldpath_len;
> +
> +    buff = qemu_malloc(length);
> +    buffp = buff;
> +    memcpy(buffp, &request->data, sizeof(request->data));
> +    buffp += sizeof(request->data);
> +    memcpy(buffp, request->path.path, request->data.path_len);
> +    buffp += request->data.path_len;
> +    memcpy(buffp, request->path.old_path, request->data.oldpath_len);
> +
> +    retval = qemu_write_full(sockfd, buff, length);

qemu_free(buff);

Also, weren't you doing the malloc() + single write() to avoid
interleaved write()?  Is that still necessary, I thought a mutex was
introduced?  It's probably worth adding a comment to explain why
you're doing the malloc + write.

Stefan
Mohan Kumar M - Feb. 8, 2011, 4:21 p.m.
On Wednesday 02 February 2011 3:24:16 pm Stefan Hajnoczi wrote:
> On Tue, Feb 1, 2011 at 5:26 AM, M. Mohan Kumar <mohan@in.ibm.com> wrote:
> > +/* Receive file descriptor and error status from chroot process */
> > +static int v9fs_receivefd(int sockfd, int *error)
> 
> The return value and int *error overlap in functionality.  Would it be
> possible to have only one mechanism for returning errors?
> 

v9fs_receivefd function returns 'fd' on success and returns EIO (and error as 
EIO) if there was a socket error and -1 on other errors.

We need to return -EIO and error as EIO to differentiate between generic EIO 
error and socket failure.
 
> *error = 0 is never done so a caller that passes an uninitialized
> local variable gets back junk when the function succeeds.  It would be
> safer to clear it at the start of this function.
I will update the patch.
> 
> Inconsistent use of errno constants and -1:
> return -EIO;
> return -1; /* == -EPERM, probably not what you wanted */

-1 denotes failure

> 
> How about getting rid of int *error and returning the -errno?  If
> if_error is set then return -fd_info.error.

Do you mean return fd on success and -errno on failure? In this case how to 
differentiate between actual EIO and EIO because of socket read/write failure?

> 
> > +/*
> > + * V9fsFileObjectRequest is written into the socket by QEMU process.
> > + * Then this request is read by chroot process using read_request
> > function + */
> > +static int v9fs_write_request(int sockfd, V9fsFileObjectRequest
> > *request) +{
> > +    int retval, length;
> > +    char *buff, *buffp;
> > +
> > +    length = sizeof(request->data) + request->data.path_len +
> > +                    request->data.oldpath_len;
> > +
> > +    buff = qemu_malloc(length);
> > +    buffp = buff;
> > +    memcpy(buffp, &request->data, sizeof(request->data));
> > +    buffp += sizeof(request->data);
> > +    memcpy(buffp, request->path.path, request->data.path_len);
> > +    buffp += request->data.path_len;
> > +    memcpy(buffp, request->path.old_path, request->data.oldpath_len);
> > +
> > +    retval = qemu_write_full(sockfd, buff, length);
> 
> qemu_free(buff);
> 
> Also, weren't you doing the malloc() + single write() to avoid
> interleaved write()?  Is that still necessary, I thought a mutex was
> introduced?  It's probably worth adding a comment to explain why
> you're doing the malloc + write.

This is used to avoid multiple write system calls. Probably I can update with 
comments. 

----
M. Mohan Kumar

Patch

diff --git a/hw/9pfs/virtio-9p-chroot.c b/hw/9pfs/virtio-9p-chroot.c
index 5150ff0..b466d9a 100644
--- a/hw/9pfs/virtio-9p-chroot.c
+++ b/hw/9pfs/virtio-9p-chroot.c
@@ -111,6 +111,86 @@  static int chroot_read_request(int sockfd, V9fsFileObjectRequest *request)
     return 0;
 }
 
+/* Receive file descriptor and error status from chroot process */
+static int v9fs_receivefd(int sockfd, int *error)
+{
+    struct msghdr msg = { };
+    struct iovec iov;
+    union MsgControl msg_control;
+    struct cmsghdr *cmsg;
+    int retval, fd;
+    FdInfo fd_info;
+
+    iov.iov_base = &fd_info;
+    iov.iov_len = sizeof(fd_info);
+
+    memset(&msg, 0, sizeof(msg));
+    msg.msg_iov = &iov;
+    msg.msg_iovlen = 1;
+    msg.msg_control = &msg_control;
+    msg.msg_controllen = sizeof(msg_control);
+
+    retval = recvmsg(sockfd, &msg, 0);
+    if (retval < 0) {
+        *error = EIO;
+        return -EIO;
+    }
+
+    if (fd_info.fi_flags & FI_SOCKERR) {
+        return -EIO;
+    }
+
+    /* If error is set, ancillary data is not present */
+    if (fd_info.fi_error) {
+        *error = fd_info.fi_error;
+        return -1;
+    }
+
+    if (!(fd_info.fi_flags & FI_FDVALID)) {
+        return 0;
+    }
+
+    for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
+        if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)) ||
+                cmsg->cmsg_level != SOL_SOCKET ||
+                cmsg->cmsg_type != SCM_RIGHTS) {
+            continue;
+        }
+        fd = *((int *)CMSG_DATA(cmsg));
+        return fd;
+    }
+
+    *error = EAGAIN;
+    return -1;
+}
+
+/*
+ * V9fsFileObjectRequest is written into the socket by QEMU process.
+ * Then this request is read by chroot process using read_request function
+ */
+static int v9fs_write_request(int sockfd, V9fsFileObjectRequest *request)
+{
+    int retval, length;
+    char *buff, *buffp;
+
+    length = sizeof(request->data) + request->data.path_len +
+                    request->data.oldpath_len;
+
+    buff = qemu_malloc(length);
+    buffp = buff;
+    memcpy(buffp, &request->data, sizeof(request->data));
+    buffp += sizeof(request->data);
+    memcpy(buffp, request->path.path, request->data.path_len);
+    buffp += request->data.path_len;
+    memcpy(buffp, request->path.old_path, request->data.oldpath_len);
+
+    retval = qemu_write_full(sockfd, buff, length);
+    if (retval != length) {
+        return EIO;
+    }
+    return 0;
+}
+
 static int chroot_daemonize(int chroot_sock)
 {
     sigset_t sigset;
@@ -139,6 +219,12 @@  static int chroot_daemonize(int chroot_sock)
     return 0;
 }
 
+static void chroot_dummy(void)
+{
+    (void)v9fs_receivefd;
+    (void)v9fs_write_request;
+}
+
 /*
  * Fork a process and chroot into the share path. Communication
  * between qemu process and chroot process happens via socket
@@ -184,6 +270,7 @@  int v9fs_chroot(FsContext *fs_ctx)
         error = qemu_write_full(chroot_sock, &code, sizeof(code));
         _exit(1);
     }
+    chroot_dummy();
 
     /*
      * Write 0 to chroot socket to indicate chroot process creation is