From patchwork Thu Feb 4 12:47:37 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Michael S. Tsirkin" X-Patchwork-Id: 44481 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [199.232.76.165]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id BE809B7D46 for ; Fri, 5 Feb 2010 00:17:14 +1100 (EST) Received: from localhost ([127.0.0.1]:51564 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1Nd1Xt-0004AW-KJ for incoming@patchwork.ozlabs.org; Thu, 04 Feb 2010 08:14:57 -0500 Received: from mailman by lists.gnu.org with tmda-scanned (Exim 4.43) id 1Nd1Ae-0004QZ-EA for qemu-devel@nongnu.org; Thu, 04 Feb 2010 07:50:56 -0500 Received: from [199.232.76.173] (port=55501 helo=monty-python.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1Nd1Ae-0004QK-1J for qemu-devel@nongnu.org; Thu, 04 Feb 2010 07:50:56 -0500 Received: from Debian-exim by monty-python.gnu.org with spam-scanned (Exim 4.60) (envelope-from ) id 1Nd1Ab-00088f-LZ for qemu-devel@nongnu.org; Thu, 04 Feb 2010 07:50:55 -0500 Received: from mx1.redhat.com ([209.132.183.28]:7030) by monty-python.gnu.org with esmtp (Exim 4.60) (envelope-from ) id 1Nd1Aa-00088H-5V for qemu-devel@nongnu.org; Thu, 04 Feb 2010 07:50:53 -0500 Received: from int-mx08.intmail.prod.int.phx2.redhat.com (int-mx08.intmail.prod.int.phx2.redhat.com [10.5.11.21]) by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id o14Comdd000456 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Thu, 4 Feb 2010 07:50:48 -0500 Received: from redhat.com (vpn2-9-138.ams2.redhat.com [10.36.9.138]) by int-mx08.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with SMTP id o14Cok7F004489; Thu, 4 Feb 2010 07:50:47 -0500 Date: Thu, 4 Feb 2010 14:47:37 +0200 From: "Michael S. Tsirkin" To: Anthony Liguori , qemu-devel@nongnu.org Message-ID: <20100204124737.GP22559@redhat.com> References: MIME-Version: 1.0 Content-Disposition: inline In-Reply-To: User-Agent: Mutt/1.5.19 (2009-01-05) X-Scanned-By: MIMEDefang 2.67 on 10.5.11.21 X-detected-operating-system: by monty-python.gnu.org: Genre and OS details not recognized. Cc: Subject: [Qemu-devel] [PATCH 15/15] virtio-net: vhost net support X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org This connects virtio-net to vhost net backend. The code is structured in a way analogous to what we have with vnet header capability in tap. We start/stop backend on driver start/stop as well as on save and vm start (for migration). Signed-off-by: Michael S. Tsirkin --- hw/virtio-net.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 files changed, 65 insertions(+), 2 deletions(-) diff --git a/hw/virtio-net.c b/hw/virtio-net.c index 6e48997..f32c6fa 100644 --- a/hw/virtio-net.c +++ b/hw/virtio-net.c @@ -17,6 +17,7 @@ #include "net/tap.h" #include "qemu-timer.h" #include "virtio-net.h" +#include "vhost_net.h" #define VIRTIO_NET_VM_VERSION 11 @@ -47,6 +48,8 @@ typedef struct VirtIONet uint8_t nomulti; uint8_t nouni; uint8_t nobcast; + uint8_t vhost_started; + VMChangeStateEntry *vmstate; struct { int in_use; int first_multi; @@ -114,6 +117,10 @@ static void virtio_net_reset(VirtIODevice *vdev) n->nomulti = 0; n->nouni = 0; n->nobcast = 0; + if (n->vhost_started) { + vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), vdev); + n->vhost_started = 0; + } /* Flush any MAC and VLAN filter table state */ n->mac_table.in_use = 0; @@ -172,7 +179,10 @@ static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features) features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO); } - return features; + if (!tap_get_vhost_net(n->nic->nc.peer)) { + return features; + } + return vhost_net_get_features(tap_get_vhost_net(n->nic->nc.peer), features); } static uint32_t virtio_net_bad_features(VirtIODevice *vdev) @@ -690,6 +700,12 @@ static void virtio_net_save(QEMUFile *f, void *opaque) { VirtIONet *n = opaque; + if (n->vhost_started) { + /* TODO: should we really stop the backend? + * If we don't, it might keep writing to memory. */ + vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), &n->vdev); + n->vhost_started = 0; + } virtio_save(&n->vdev, f); qemu_put_buffer(f, n->mac, ETH_ALEN); @@ -802,7 +818,6 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) qemu_mod_timer(n->tx_timer, qemu_get_clock(vm_clock) + TX_TIMER_INTERVAL); } - return 0; } @@ -822,6 +837,47 @@ static NetClientInfo net_virtio_info = { .link_status_changed = virtio_net_set_link_status, }; +static void virtio_net_set_status(struct VirtIODevice *vdev) +{ + VirtIONet *n = to_virtio_net(vdev); + if (!n->nic->nc.peer) { + return; + } + if (n->nic->nc.peer->info->type != NET_CLIENT_TYPE_TAP) { + return; + } + + if (!tap_get_vhost_net(n->nic->nc.peer)) { + return; + } + if (!!n->vhost_started == !!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { + return; + } + if (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) { + int r = vhost_net_start(tap_get_vhost_net(n->nic->nc.peer), vdev); + if (r < 0) { + fprintf(stderr, "unable to start vhost net: %d: " + "falling back on userspace virtio\n", -r); + } else { + n->vhost_started = 1; + } + } else { + vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), vdev); + n->vhost_started = 0; + } +} + +static void virtio_net_vmstate_change(void *opaque, int running, int reason) +{ + VirtIONet *n = opaque; + if (!running) { + return; + } + /* This is called when vm is started, it will start vhost backend if it + * appropriate e.g. after migration. */ + virtio_net_set_status(&n->vdev); +} + VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf) { VirtIONet *n; @@ -837,6 +893,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf) n->vdev.set_features = virtio_net_set_features; n->vdev.bad_features = virtio_net_bad_features; n->vdev.reset = virtio_net_reset; + n->vdev.set_status = virtio_net_set_status; n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx); n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx); n->ctrl_vq = virtio_add_queue(&n->vdev, 64, virtio_net_handle_ctrl); @@ -859,6 +916,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf) register_savevm("virtio-net", virtio_net_id++, VIRTIO_NET_VM_VERSION, virtio_net_save, virtio_net_load, n); + n->vmstate = qemu_add_vm_change_state_handler(virtio_net_vmstate_change, n); return &n->vdev; } @@ -866,6 +924,11 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf) void virtio_net_exit(VirtIODevice *vdev) { VirtIONet *n = DO_UPCAST(VirtIONet, vdev, vdev); + qemu_del_vm_change_state_handler(n->vmstate); + + if (n->vhost_started) { + vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), vdev); + } qemu_purge_queued_packets(&n->nic->nc);