From patchwork Fri Oct 22 18:43:26 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Michael Roth X-Patchwork-Id: 68911 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [199.232.76.165]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 9C1DFB70D0 for ; Sat, 23 Oct 2010 05:53:24 +1100 (EST) Received: from localhost ([127.0.0.1]:35929 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1P9Mjx-0003aT-6a for incoming@patchwork.ozlabs.org; Fri, 22 Oct 2010 14:53:21 -0400 Received: from [140.186.70.92] (port=58251 helo=eggs.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1P9Mb1-0006xN-Nn for qemu-devel@nongnu.org; Fri, 22 Oct 2010 14:44:10 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1P9May-0001nK-R6 for qemu-devel@nongnu.org; Fri, 22 Oct 2010 14:44:07 -0400 Received: from e1.ny.us.ibm.com ([32.97.182.141]:48350) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1P9May-0001n8-O8 for qemu-devel@nongnu.org; Fri, 22 Oct 2010 14:44:04 -0400 Received: from d01relay05.pok.ibm.com (d01relay05.pok.ibm.com [9.56.227.237]) by e1.ny.us.ibm.com (8.14.4/8.13.1) with ESMTP id o9MIalHr023040 for ; Fri, 22 Oct 2010 14:36:47 -0400 Received: from d01av01.pok.ibm.com (d01av01.pok.ibm.com [9.56.224.215]) by d01relay05.pok.ibm.com (8.13.8/8.13.8/NCO v10.0) with ESMTP id o9MIi3Eu148418 for ; Fri, 22 Oct 2010 14:44:03 -0400 Received: from d01av01.pok.ibm.com (loopback [127.0.0.1]) by d01av01.pok.ibm.com (8.14.4/8.13.1/NCO v10.0 AVout) with ESMTP id o9MIi3BP015192 for ; Fri, 22 Oct 2010 14:44:03 -0400 Received: from localhost.localdomain (sig-9-76-202-211.mts.ibm.com [9.76.202.211]) by d01av01.pok.ibm.com (8.14.4/8.13.1/NCO v10.0 AVin) with ESMTP id o9MIhchx013644; Fri, 22 Oct 2010 14:44:02 -0400 From: Michael Roth To: qemu-devel@nongnu.org Date: Fri, 22 Oct 2010 13:43:26 -0500 Message-Id: <1287773011-24726-11-git-send-email-mdroth@linux.vnet.ibm.com> X-Mailer: git-send-email 1.7.0.4 In-Reply-To: <1287773011-24726-1-git-send-email-mdroth@linux.vnet.ibm.com> References: <1287773011-24726-1-git-send-email-mdroth@linux.vnet.ibm.com> X-detected-operating-system: by eggs.gnu.org: GNU/Linux 2.6, seldom 2.4 (older, 4) Cc: aliguori@linux.vnet.ibm.com, ryanh@us.ibm.com, agl@linux.vnet.ibm.com, mdroth@linux.vnet.ibm.com, abeekhof@redhat.com Subject: [Qemu-devel] [RFC][PATCH 10/15] virtproxy: add handler for control packet X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Process control packets coming in over the channel. This entails setting up/tearing down connections to local services initiated from the other end of the channel. Signed-off-by: Michael Roth --- virtproxy.c | 154 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 154 insertions(+), 0 deletions(-) diff --git a/virtproxy.c b/virtproxy.c index 57ab2b0..4f56aba 100644 --- a/virtproxy.c +++ b/virtproxy.c @@ -235,6 +235,160 @@ static void vp_channel_accept(void *opaque) vp_set_fd_handler(drv->listen_fd, NULL, NULL, NULL); } +/* handle control packets + * + * process VPPackets containing control messages + */ +static int vp_handle_control_packet(VPDriver *drv, const VPPacket *pkt) +{ + const VPControlMsg *msg = &pkt->payload.msg; + int ret; + + TRACE("called with drv: %p", drv); + + switch (msg->type) { + case VP_CONTROL_CONNECT_INIT: { + int client_fd = msg->args.connect_init.client_fd; + int server_fd; + char service_id[VP_SERVICE_ID_LEN]; + VPPacket resp_pkt; + VPConn *new_conn; + VPIForward *iforward; + + pstrcpy(service_id, VP_SERVICE_ID_LEN, + msg->args.connect_init.service_id); + TRACE("setting up connection for service id %s", service_id); + + /* create server connection on behalf of remote end */ + iforward = get_iforward(drv, service_id); + if (iforward == NULL) { + LOG("no forwarder configured for service id"); + return -1; + } + + qemu_opts_print(iforward->socket_opts, NULL); + if (qemu_opt_get(iforward->socket_opts, "host") != NULL) { + server_fd = inet_connect_opts(iforward->socket_opts); + } else if (qemu_opt_get(iforward->socket_opts, "path") != NULL) { + server_fd = unix_connect_opts(iforward->socket_opts); + } else { + LOG("unable to find listening socket host/addr info"); + return -1; + } + + if (server_fd == -1) { + LOG("failed to create connection to service with id %s", + service_id); + } + TRACE("server_fd: %d", server_fd); + + new_conn = qemu_mallocz(sizeof(VPConn)); + if (!new_conn) { + LOG("memory allocation failed"); + return -1; + } + + /* send a connect_ack back over the channel */ + /* TODO: all fields should be explicitly set so we shouldn't + * need to memset. this might hurt if we beef up VPPacket size + */ + memset(&resp_pkt, 0, sizeof(resp_pkt)); + resp_pkt.type = VP_PKT_CONTROL; + resp_pkt.payload.msg.type = VP_CONTROL_CONNECT_ACK; + resp_pkt.payload.msg.args.connect_ack.server_fd = server_fd; + resp_pkt.payload.msg.args.connect_ack.client_fd = client_fd; + resp_pkt.magic = VP_MAGIC; + + /* TODO: can this potentially block or cause a deadlock with + * the remote end? need to look into potentially buffering these + * if it looks like the remote end is waiting for us to read data + * off the channel. + */ + if (drv->channel_fd == -1) { + TRACE("channel no longer connected, ignoring packet"); + return -1; + } + + ret = vp_send_all(drv->channel_fd, (void *)&resp_pkt, sizeof(resp_pkt)); + if (ret == -1) { + LOG("error sending data over channel"); + return -1; + } + if (ret != sizeof(resp_pkt)) { + TRACE("buffer full? %d bytes remaining", ret); + return -1; + } + + /* add new VPConn to list and set a read handler for it */ + new_conn->drv = drv; + new_conn->client_fd = client_fd; + new_conn->server_fd = server_fd; + new_conn->type = VP_CONN_SERVER; + new_conn->state = VP_STATE_CONNECTED; + QLIST_INSERT_HEAD(&drv->conns, new_conn, next); + vp_set_fd_handler(server_fd, vp_conn_read, NULL, new_conn); + + break; + } + case VP_CONTROL_CONNECT_ACK: { + int client_fd = msg->args.connect_ack.client_fd; + int server_fd = msg->args.connect_ack.server_fd; + VPConn *conn; + + TRACE("recieved ack from remote end for client fd %d", client_fd); + + if (server_fd <= 0) { + LOG("remote end sent invalid server fd"); + return -1; + } + + conn = get_conn(drv, client_fd, true); + + if (conn == NULL) { + LOG("failed to find connection with client_fd %d", client_fd); + return -1; + } + + conn->server_fd = server_fd; + conn->state = VP_STATE_CONNECTED; + vp_set_fd_handler(client_fd, vp_conn_read, NULL, conn); + + break; + } + case VP_CONTROL_CLOSE: { + int fd; + VPConn *conn; + + TRACE("closing connection on behalf of remote end"); + + if (msg->args.close.client_fd >= 0) { + fd = msg->args.close.client_fd; + TRACE("recieved close msg from remote end for client fd %d", fd); + conn = get_conn(drv, fd, true); + } else if (msg->args.close.server_fd >= 0) { + fd = msg->args.close.server_fd; + TRACE("recieved close msg from remote end for server fd %d", fd); + conn = get_conn(drv, fd, false); + } else { + LOG("invalid fd"); + return -1; + } + + if (conn == NULL) { + LOG("failed to find conn with specified fd %d", fd); + return -1; + } + + closesocket(fd); + vp_set_fd_handler(fd, NULL, NULL, conn); + QLIST_REMOVE(conn, next); + qemu_free(conn); + break; + } + } + return 0; +} + /* handle data packets * * process VPPackets containing data and send them to the corresponding