From patchwork Tue Jan 24 18:04:20 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Paolo Bonzini X-Patchwork-Id: 719300 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [IPv6:2001:4830:134:3::11]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPS id 3v7Gc05c6Lz9srY for ; Wed, 25 Jan 2017 05:17:04 +1100 (AEDT) Authentication-Results: ozlabs.org; dkim=fail reason="signature verification failed" (2048-bit key; unprotected) header.d=gmail.com header.i=@gmail.com header.b="CWiGz8vl"; dkim-atps=neutral Received: from localhost ([::1]:54095 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1cW5eP-0001KA-Au for incoming@patchwork.ozlabs.org; Tue, 24 Jan 2017 13:17:01 -0500 Received: from eggs.gnu.org ([2001:4830:134:3::10]:44406) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1cW5SL-0006IP-Eq for qemu-devel@nongnu.org; Tue, 24 Jan 2017 13:04:38 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1cW5SJ-0000BG-QQ for qemu-devel@nongnu.org; Tue, 24 Jan 2017 13:04:33 -0500 Received: from mail-wm0-x242.google.com ([2a00:1450:400c:c09::242]:35400) by eggs.gnu.org with esmtps (TLS1.0:RSA_AES_128_CBC_SHA1:16) (Exim 4.71) (envelope-from ) id 1cW5SJ-0000B6-Hb for qemu-devel@nongnu.org; Tue, 24 Jan 2017 13:04:31 -0500 Received: by mail-wm0-x242.google.com with SMTP id d140so36533430wmd.2 for ; Tue, 24 Jan 2017 10:04:31 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025; h=sender:from:to:cc:subject:date:message-id:in-reply-to:references; bh=lL96XwRjzpKbUyrGD9ijAdrl0X7uuvMVl7z3HQ4MFb4=; b=CWiGz8vlhVmdCvGqkx8pETFtUPj63tBT5JpGIiwzkLpBPBSjZZMldGYRq/qYcuNWhw +fOf125APpwCxGvRl6qJeiNVJCeC2kJh1ba56z5s3J6IOFMhddyWjPs7J8UViaVyAsXr 1C5tyXTtW3F8tqqmYXXcfLjOqYuZq49nTCk0n7qJROg+Bx7NA2yp3i8yVzoXlGFCz/9k 4nyvJ/GFxoZYcHiv0nJGD/D9NNPq5ZEyuTxPqqatyj9frpQLwFKNxA9RE8orcPt2/8qk PIwxVf+5+gCvryMt6gyTVrhre0CC7aO9WdG9x1JY9UEbVFkg0R7Zwr27fWRGx/6eD3C0 k2dA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:sender:from:to:cc:subject:date:message-id :in-reply-to:references; bh=lL96XwRjzpKbUyrGD9ijAdrl0X7uuvMVl7z3HQ4MFb4=; b=cztcTegf1xW1iRbYURoSeo2a4hY4jbGPXM3RY7lqZb4MeiSDiqOVZtXgIH/ri+wpE1 ocLXXGBW9eB8fwZQJ300W4ZzEVGAeyIoHxImsuRtHOfQgGCrOGviwHnCdfW+UM1pXicO Wft2zthjBISEo2ZQVRr+6ltcB1LJIiLFqCVxQd6KGnJr47h7UV45juJHbRYcJ3bnQaxn nuiDQZnHJS8EKOyRYerhWDJg7UR599rPFLcvVpwgV3jH+NnjLCuug5myJZ2mZrkZ6jUN iNxVWvyktjAWETDafTQvHxCtecF1AKvAHXYAH7DYl2DyNaDaV1YNr8+2Ijx8PH4sGcXP 4jDQ== X-Gm-Message-State: AIkVDXKy1+3AVnD4Gai7TFIdWPbEenOA7m5MMsGBZ7GifmYdKXWHXSN7cANaCmZorUyeBQ== X-Received: by 10.28.146.12 with SMTP id u12mr5190938wmd.113.1485281070304; Tue, 24 Jan 2017 10:04:30 -0800 (PST) Received: from localhost.localdomain (94-39-187-56.adsl-ull.clienti.tiscali.it. [94.39.187.56]) by smtp.gmail.com with ESMTPSA id 18sm21059075wrb.14.2017.01.24.10.04.28 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Tue, 24 Jan 2017 10:04:29 -0800 (PST) From: Paolo Bonzini To: qemu-devel@nongnu.org Date: Tue, 24 Jan 2017 19:04:20 +0100 Message-Id: <20170124180420.12430-9-pbonzini@redhat.com> X-Mailer: git-send-email 2.9.3 In-Reply-To: <20170124180420.12430-1-pbonzini@redhat.com> References: <20170124180420.12430-1-pbonzini@redhat.com> X-detected-operating-system: by eggs.gnu.org: GNU/Linux 2.2.x-3.x [generic] X-Received-From: 2a00:1450:400c:c09::242 Subject: [Qemu-devel] [PATCH 8/8] virtio: use VRingMemoryRegionCaches for avail and used rings X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: stefanha@redhat.com, mst@redhat.com Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: "Qemu-devel" The virtio-net change is necessary because it uses virtqueue_fill and virtqueue_flush instead of the more convenient virtqueue_push. Signed-off-by: Paolo Bonzini Reviewed-by: Stefan Hajnoczi --- v1->v2: improved and simplified error recovery removed duplicate rcu_read_lock! hw/net/virtio-net.c | 14 +++++- hw/virtio/virtio.c | 130 ++++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 107 insertions(+), 37 deletions(-) diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 7b3ad4a..6f0e397 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -1130,7 +1130,8 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) return 0; } -static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size) +static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, + size_t size) { VirtIONet *n = qemu_get_nic_opaque(nc); VirtIONetQueue *q = virtio_net_get_subqueue(nc); @@ -1233,6 +1234,17 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t return size; } +static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, + size_t size) +{ + ssize_t r; + + rcu_read_lock(); + r = virtio_net_receive_rcu(nc, buf, size); + rcu_read_unlock(); + return r; +} + static int32_t virtio_net_flush_tx(VirtIONetQueue *q); static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index b664b6e..114eb5a 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -169,6 +169,7 @@ void virtio_queue_update_rings(VirtIODevice *vdev, int n) virtio_init_region_cache(vdev, n); } +/* Called within rcu_read_lock(). */ static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc, MemoryRegionCache *cache, int i) { @@ -180,88 +181,110 @@ static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc, virtio_tswap16s(vdev, &desc->next); } +/* Called within rcu_read_lock(). */ static inline uint16_t vring_avail_flags(VirtQueue *vq) { - hwaddr pa; - pa = vq->vring.avail + offsetof(VRingAvail, flags); - return virtio_lduw_phys(vq->vdev, pa); + VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches); + hwaddr pa = offsetof(VRingAvail, flags); + return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); } +/* Called within rcu_read_lock(). */ static inline uint16_t vring_avail_idx(VirtQueue *vq) { - hwaddr pa; - pa = vq->vring.avail + offsetof(VRingAvail, idx); - vq->shadow_avail_idx = virtio_lduw_phys(vq->vdev, pa); + VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches); + hwaddr pa = offsetof(VRingAvail, idx); + vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); return vq->shadow_avail_idx; } +/* Called within rcu_read_lock(). */ static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) { - hwaddr pa; - pa = vq->vring.avail + offsetof(VRingAvail, ring[i]); - return virtio_lduw_phys(vq->vdev, pa); + VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches); + hwaddr pa = offsetof(VRingAvail, ring[i]); + return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); } +/* Called within rcu_read_lock(). */ static inline uint16_t vring_get_used_event(VirtQueue *vq) { return vring_avail_ring(vq, vq->vring.num); } +/* Called within rcu_read_lock(). */ static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem, int i) { - hwaddr pa; + VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches); + hwaddr pa = offsetof(VRingUsed, ring[i]); virtio_tswap32s(vq->vdev, &uelem->id); virtio_tswap32s(vq->vdev, &uelem->len); - pa = vq->vring.used + offsetof(VRingUsed, ring[i]); - address_space_write(vq->vdev->dma_as, pa, MEMTXATTRS_UNSPECIFIED, - (void *)uelem, sizeof(VRingUsedElem)); + address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem)); + address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem)); } +/* Called within rcu_read_lock(). */ static uint16_t vring_used_idx(VirtQueue *vq) { - hwaddr pa; - pa = vq->vring.used + offsetof(VRingUsed, idx); - return virtio_lduw_phys(vq->vdev, pa); + VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches); + hwaddr pa = vq->vring.used + offsetof(VRingUsed, idx); + return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); } +/* Called within rcu_read_lock(). */ static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) { - hwaddr pa; - pa = vq->vring.used + offsetof(VRingUsed, idx); - virtio_stw_phys(vq->vdev, pa, val); + VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches); + hwaddr pa = offsetof(VRingUsed, idx); + virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); + address_space_cache_invalidate(&caches->used, pa, sizeof(val)); vq->used_idx = val; } +/* Called within rcu_read_lock(). */ static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) { + VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches); VirtIODevice *vdev = vq->vdev; - hwaddr pa; - pa = vq->vring.used + offsetof(VRingUsed, flags); - virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask); + hwaddr pa = offsetof(VRingUsed, flags); + uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); + + virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask); + address_space_cache_invalidate(&caches->used, pa, sizeof(flags)); } +/* Called within rcu_read_lock(). */ static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) { + VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches); VirtIODevice *vdev = vq->vdev; - hwaddr pa; - pa = vq->vring.used + offsetof(VRingUsed, flags); - virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask); + hwaddr pa = offsetof(VRingUsed, flags); + uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); + + virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask); + address_space_cache_invalidate(&caches->used, pa, sizeof(flags)); } +/* Called within rcu_read_lock(). */ static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val) { + VRingMemoryRegionCaches *caches; hwaddr pa; if (!vq->notification) { return; } - pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]); - virtio_stw_phys(vq->vdev, pa, val); + + caches = atomic_rcu_read(&vq->vring.caches); + pa = offsetof(VRingUsed, ring[vq->vring.num]); + virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); } void virtio_queue_set_notification(VirtQueue *vq, int enable) { vq->notification = enable; + + rcu_read_lock(); if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { vring_set_avail_event(vq, vring_avail_idx(vq)); } else if (enable) { @@ -273,6 +296,7 @@ void virtio_queue_set_notification(VirtQueue *vq, int enable) /* Expose avail event/used flags before caller checks the avail idx. */ smp_mb(); } + rcu_read_unlock(); } int virtio_queue_ready(VirtQueue *vq) @@ -281,8 +305,9 @@ int virtio_queue_ready(VirtQueue *vq) } /* Fetch avail_idx from VQ memory only when we really need to know if - * guest has added some buffers. */ -int virtio_queue_empty(VirtQueue *vq) + * guest has added some buffers. + * Called within rcu_read_lock(). */ +static int virtio_queue_empty_rcu(VirtQueue *vq) { if (vq->shadow_avail_idx != vq->last_avail_idx) { return 0; @@ -291,6 +316,20 @@ int virtio_queue_empty(VirtQueue *vq) return vring_avail_idx(vq) == vq->last_avail_idx; } +int virtio_queue_empty(VirtQueue *vq) +{ + bool empty; + + if (vq->shadow_avail_idx != vq->last_avail_idx) { + return 0; + } + + rcu_read_lock(); + empty = vring_avail_idx(vq) == vq->last_avail_idx; + rcu_read_unlock(); + return empty; +} + static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len) { @@ -369,6 +408,7 @@ bool virtqueue_rewind(VirtQueue *vq, unsigned int num) return true; } +/* Called within rcu_read_lock(). */ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len, unsigned int idx) { @@ -389,6 +429,7 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, vring_used_write(vq, &uelem, idx); } +/* Called within rcu_read_lock(). */ void virtqueue_flush(VirtQueue *vq, unsigned int count) { uint16_t old, new; @@ -412,10 +453,13 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count) void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len) { + rcu_read_lock(); virtqueue_fill(vq, elem, len, 0); virtqueue_flush(vq, 1); + rcu_read_unlock(); } +/* Called within rcu_read_lock(). */ static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) { uint16_t num_heads = vring_avail_idx(vq) - idx; @@ -435,6 +479,7 @@ static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) return num_heads; } +/* Called within rcu_read_lock(). */ static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx, unsigned int *head) { @@ -736,8 +781,9 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz) if (unlikely(vdev->broken)) { return NULL; } - if (virtio_queue_empty(vq)) { - return NULL; + rcu_read_lock(); + if (virtio_queue_empty_rcu(vq)) { + goto done; } /* Needed after virtio_queue_empty(), see comment in * virtqueue_num_heads(). */ @@ -750,11 +796,11 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz) if (vq->inuse >= vq->vring.num) { virtio_error(vdev, "Virtqueue size exceeded"); - return NULL; + goto done; } if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) { - return NULL; + goto done; } if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { @@ -763,7 +809,6 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz) i = head; - rcu_read_lock(); caches = atomic_rcu_read(&vq->vring.caches); if (caches->desc.len < max * sizeof(VRingDesc)) { virtio_error(vdev, "Cannot map descriptor ring"); @@ -1477,6 +1523,7 @@ static void virtio_set_isr(VirtIODevice *vdev, int value) } } +/* Called within rcu_read_lock(). */ static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq) { uint16_t old, new; @@ -1502,7 +1549,12 @@ static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq) void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq) { - if (!virtio_should_notify(vdev, vq)) { + bool should_notify; + rcu_read_lock(); + should_notify = virtio_should_notify(vdev, vq); + rcu_read_unlock(); + + if (!should_notify) { return; } @@ -1529,7 +1581,12 @@ void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq) void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) { - if (!virtio_should_notify(vdev, vq)) { + bool should_notify; + rcu_read_lock(); + should_notify = virtio_should_notify(vdev, vq); + rcu_read_unlock(); + + if (!should_notify) { return; } @@ -1983,6 +2040,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) } } + rcu_read_lock(); for (i = 0; i < num; i++) { if (vdev->vq[i].vring.desc) { uint16_t nheads; @@ -2017,6 +2075,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) } } } + rcu_read_unlock(); return 0; }