From patchwork Thu Feb 25 18:28:44 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Michael S. Tsirkin" X-Patchwork-Id: 46269 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [199.232.76.165]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id BA2D0B7C33 for ; Fri, 26 Feb 2010 05:57:11 +1100 (EST) Received: from localhost ([127.0.0.1]:59670 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1Nkipt-00089o-IC for incoming@patchwork.ozlabs.org; Thu, 25 Feb 2010 13:53:21 -0500 Received: from mailman by lists.gnu.org with tmda-scanned (Exim 4.43) id 1NkiVF-0001ZC-VD for qemu-devel@nongnu.org; Thu, 25 Feb 2010 13:32:02 -0500 Received: from [199.232.76.173] (port=56237 helo=monty-python.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1NkiVF-0001Yt-Ea for qemu-devel@nongnu.org; Thu, 25 Feb 2010 13:32:01 -0500 Received: from Debian-exim by monty-python.gnu.org with spam-scanned (Exim 4.60) (envelope-from ) id 1NkiVD-0006j2-GZ for qemu-devel@nongnu.org; Thu, 25 Feb 2010 13:32:01 -0500 Received: from mx1.redhat.com ([209.132.183.28]:26679) by monty-python.gnu.org with esmtp (Exim 4.60) (envelope-from ) id 1NkiVD-0006iu-2y for qemu-devel@nongnu.org; Thu, 25 Feb 2010 13:31:59 -0500 Received: from int-mx03.intmail.prod.int.phx2.redhat.com (int-mx03.intmail.prod.int.phx2.redhat.com [10.5.11.16]) by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id o1PIVvVL007418 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Thu, 25 Feb 2010 13:31:57 -0500 Received: from redhat.com (vpn2-11-182.ams2.redhat.com [10.36.11.182]) by int-mx03.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with SMTP id o1PIVsdS024027; Thu, 25 Feb 2010 13:31:55 -0500 Date: Thu, 25 Feb 2010 20:28:44 +0200 From: "Michael S. Tsirkin" To: Anthony Liguori , qemu-devel@nongnu.org Message-ID: References: MIME-Version: 1.0 Content-Disposition: inline In-Reply-To: X-Mutt-Fcc: =sent User-Agent: Mutt/1.5.19 (2009-01-05) X-Scanned-By: MIMEDefang 2.67 on 10.5.11.16 X-detected-operating-system: by monty-python.gnu.org: Genre and OS details not recognized. Cc: amit.shah@redhat.com, kraxel@redhat.com, quintela@redhat.com Subject: [Qemu-devel] [PATCHv2 12/12] virtio-net: vhost net support X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org This connects virtio-net to vhost net backend. The code is structured in a way analogous to what we have with vnet header capability in tap. We start/stop backend on driver start/stop as well as on save and vm start (for migration). Signed-off-by: Michael S. Tsirkin --- hw/virtio-net.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 files changed, 69 insertions(+), 2 deletions(-) diff --git a/hw/virtio-net.c b/hw/virtio-net.c index 5c0093e..9ddd58c 100644 --- a/hw/virtio-net.c +++ b/hw/virtio-net.c @@ -17,6 +17,7 @@ #include "net/tap.h" #include "qemu-timer.h" #include "virtio-net.h" +#include "vhost_net.h" #define VIRTIO_NET_VM_VERSION 11 @@ -47,6 +48,8 @@ typedef struct VirtIONet uint8_t nomulti; uint8_t nouni; uint8_t nobcast; + uint8_t vhost_started; + VMChangeStateEntry *vmstate; struct { int in_use; int first_multi; @@ -114,6 +117,10 @@ static void virtio_net_reset(VirtIODevice *vdev) n->nomulti = 0; n->nouni = 0; n->nobcast = 0; + if (n->vhost_started) { + vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), vdev); + n->vhost_started = 0; + } /* Flush any MAC and VLAN filter table state */ n->mac_table.in_use = 0; @@ -172,7 +179,14 @@ static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features) features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO); } - return features; + if (!n->nic->nc.peer || + n->nic->nc.peer->info->type != NET_CLIENT_TYPE_TAP) { + return features; + } + if (!tap_get_vhost_net(n->nic->nc.peer)) { + return features; + } + return vhost_net_get_features(tap_get_vhost_net(n->nic->nc.peer), features); } static uint32_t virtio_net_bad_features(VirtIODevice *vdev) @@ -698,6 +712,12 @@ static void virtio_net_save(QEMUFile *f, void *opaque) { VirtIONet *n = opaque; + if (n->vhost_started) { + /* TODO: should we really stop the backend? + * If we don't, it might keep writing to memory. */ + vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), &n->vdev); + n->vhost_started = 0; + } virtio_save(&n->vdev, f); qemu_put_buffer(f, n->mac, ETH_ALEN); @@ -810,7 +830,6 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) qemu_mod_timer(n->tx_timer, qemu_get_clock(vm_clock) + TX_TIMER_INTERVAL); } - return 0; } @@ -830,6 +849,47 @@ static NetClientInfo net_virtio_info = { .link_status_changed = virtio_net_set_link_status, }; +static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) +{ + VirtIONet *n = to_virtio_net(vdev); + if (!n->nic->nc.peer) { + return; + } + if (n->nic->nc.peer->info->type != NET_CLIENT_TYPE_TAP) { + return; + } + + if (!tap_get_vhost_net(n->nic->nc.peer)) { + return; + } + if (!!n->vhost_started == !!(status & VIRTIO_CONFIG_S_DRIVER_OK)) { + return; + } + if (status & VIRTIO_CONFIG_S_DRIVER_OK) { + int r = vhost_net_start(tap_get_vhost_net(n->nic->nc.peer), vdev); + if (r < 0) { + fprintf(stderr, "unable to start vhost net: %d: " + "falling back on userspace virtio\n", -r); + } else { + n->vhost_started = 1; + } + } else { + vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), vdev); + n->vhost_started = 0; + } +} + +static void virtio_net_vmstate_change(void *opaque, int running, int reason) +{ + VirtIONet *n = opaque; + if (!running) { + return; + } + /* This is called when vm is started, it will start vhost backend if + * appropriate e.g. after migration. */ + virtio_net_set_status(&n->vdev, n->vdev.status); +} + VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf) { VirtIONet *n; @@ -845,6 +905,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf) n->vdev.set_features = virtio_net_set_features; n->vdev.bad_features = virtio_net_bad_features; n->vdev.reset = virtio_net_reset; + n->vdev.set_status = virtio_net_set_status; n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx); n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx); n->ctrl_vq = virtio_add_queue(&n->vdev, 64, virtio_net_handle_ctrl); @@ -867,6 +928,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf) register_savevm("virtio-net", virtio_net_id++, VIRTIO_NET_VM_VERSION, virtio_net_save, virtio_net_load, n); + n->vmstate = qemu_add_vm_change_state_handler(virtio_net_vmstate_change, n); return &n->vdev; } @@ -874,6 +936,11 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf) void virtio_net_exit(VirtIODevice *vdev) { VirtIONet *n = DO_UPCAST(VirtIONet, vdev, vdev); + qemu_del_vm_change_state_handler(n->vmstate); + + if (n->vhost_started) { + vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), vdev); + } qemu_purge_queued_packets(&n->nic->nc);