From patchwork Tue May 7 05:46:57 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: pingfan liu X-Patchwork-Id: 241982 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 57A672C0124 for ; Tue, 7 May 2013 15:51:10 +1000 (EST) Received: from localhost ([::1]:34548 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UZans-0006Mb-HY for incoming@patchwork.ozlabs.org; Tue, 07 May 2013 01:51:08 -0400 Received: from eggs.gnu.org ([208.118.235.92]:60576) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UZamJ-0004GY-Gc for qemu-devel@nongnu.org; Tue, 07 May 2013 01:49:33 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1UZamH-00084f-TC for qemu-devel@nongnu.org; Tue, 07 May 2013 01:49:31 -0400 Received: from mail-pa0-f53.google.com ([209.85.220.53]:47794) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UZamH-00084R-K2 for qemu-devel@nongnu.org; Tue, 07 May 2013 01:49:29 -0400 Received: by mail-pa0-f53.google.com with SMTP id kq12so234250pab.40 for ; Mon, 06 May 2013 22:49:28 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=x-received:from:to:cc:subject:date:message-id:x-mailer:in-reply-to :references; bh=P3IFIsI+IohG4xrgEN3k85LjyXLJjz09fe8cgKdN294=; b=OnzqMJk3ffZVQsG8B/dWM1rPLJZxmn+nH5eTXCYvzVI0BHu0uHUYdRuRvQdvARtXYi 7mTGDHFEM6Yl/gEu3MjyTa6p/cH56TneyMShPVoFJumc+Ox4KtwcNc2ZkUe/bjvk8WsT tuqYzwnUzzd1HkVz2n77hI94oEmydb8xfLUjmOeehBFhmlkgTcupll269tOhVcAbjFTt pXTaDt5YKdq2HBuMz4mvSXeoYbX8ACaqTtnY5sv3pKYTScIiFJquoSQ5bfspWT9OjQHp DEb0VaO9I9uUgmSBhVSEqE4KJEs/AfZ6SQXx36dp511lO6qrVHls/EWnXOGZz4gMHY1Z Lobg== X-Received: by 10.68.59.130 with SMTP id z2mr688957pbq.113.1367905768856; Mon, 06 May 2013 22:49:28 -0700 (PDT) Received: from localhost ([202.108.130.138]) by mx.google.com with ESMTPSA id li15sm28923086pab.2.2013.05.06.22.49.23 for (version=TLSv1.1 cipher=RC4-SHA bits=128/128); Mon, 06 May 2013 22:49:27 -0700 (PDT) From: Liu Ping Fan To: qemu-devel@nongnu.org Date: Tue, 7 May 2013 13:46:57 +0800 Message-Id: <1367905622-21038-10-git-send-email-qemulist@gmail.com> X-Mailer: git-send-email 1.7.4.4 In-Reply-To: <1367905622-21038-1-git-send-email-qemulist@gmail.com> References: <1367905622-21038-1-git-send-email-qemulist@gmail.com> X-detected-operating-system: by eggs.gnu.org: GNU/Linux 2.2.x-3.x [generic] [fuzzy] X-Received-From: 209.85.220.53 Cc: mdroth , Jan Kiszka , Stefan Hajnoczi , Anthony Liguori , Paolo Bonzini Subject: [Qemu-devel] [PATCH v1 09/14] net: introduce lock to protect NetClientState's peer's access X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org From: Liu Ping Fan Introduce nc->peer_lock to shield off the race of nc->peer's reader and deleter. With it, after deleter finish, no new qemu_send_packet_xx() will append packet to peer->send_queue, therefore no new reference from packet->sender to nc will exist in nc->peer->send_queue. Signed-off-by: Liu Ping Fan --- include/net/net.h | 7 +++++ net/net.c | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++--- net/queue.c | 4 +- 3 files changed, 84 insertions(+), 6 deletions(-) diff --git a/include/net/net.h b/include/net/net.h index 88332d2..54f91ea 100644 --- a/include/net/net.h +++ b/include/net/net.h @@ -5,6 +5,7 @@ #include "qemu-common.h" #include "qapi/qmp/qdict.h" #include "qemu/option.h" +#include "qemu/thread.h" #include "net/queue.h" #include "migration/vmstate.h" #include "qapi-types.h" @@ -63,6 +64,10 @@ struct NetClientState { NetClientInfo *info; int link_down; QTAILQ_ENTRY(NetClientState) next; + /* protect the race access of peer only between reader and writer. + * to resolve the writer's race condition, resort on biglock. + */ + QemuMutex peer_lock; NetClientState *peer; NetQueue *send_queue; char *model; @@ -75,6 +80,7 @@ struct NetClientState { typedef struct NICState { NetClientState *ncs; + NetClientState **pending_peer; NICConf *conf; void *opaque; bool peer_deleted; @@ -102,6 +108,7 @@ NetClientState *qemu_find_vlan_client_by_name(Monitor *mon, int vlan_id, const char *client_str); typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque); void qemu_foreach_nic(qemu_nic_foreach func, void *opaque); +int qemu_can_send_packet_nolock(NetClientState *sender); int qemu_can_send_packet(NetClientState *nc); ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov, int iovcnt); diff --git a/net/net.c b/net/net.c index f3d67f8..7619762 100644 --- a/net/net.c +++ b/net/net.c @@ -207,6 +207,7 @@ static void qemu_net_client_setup(NetClientState *nc, nc->peer = peer; peer->peer = nc; } + qemu_mutex_init(&nc->peer_lock); QTAILQ_INSERT_TAIL(&net_clients, nc, next); nc->send_queue = qemu_new_net_queue(nc); @@ -246,6 +247,7 @@ NICState *qemu_new_nic(NetClientInfo *info, nic->ncs = (void *)nic + info->size; nic->conf = conf; nic->opaque = opaque; + nic->pending_peer = g_malloc0(sizeof(NetClientState *) * queues); for (i = 0; i < queues; i++) { qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name, @@ -304,6 +306,38 @@ static void qemu_free_net_client(NetClientState *nc) } } +/* elimate the reference and sync with exit of rx/tx action. + * And flush out peer's queue. + */ +static void qemu_net_client_detach_flush(NetClientState *nc) +{ + NetClientState *peer; + + /* reader of self's peer field , fixme? the deleters are not concurrent, + * so this pair lock can save. + */ + qemu_mutex_lock(&nc->peer_lock); + peer = nc->peer; + qemu_mutex_unlock(&nc->peer_lock); + + /* writer of peer's peer field*/ + if (peer) { + /* exclude the race with tx to @nc */ + qemu_mutex_lock(&peer->peer_lock); + peer->peer = NULL; + qemu_mutex_unlock(&peer->peer_lock); + } + + /* writer of self's peer field*/ + /* exclude the race with tx from @nc */ + qemu_mutex_lock(&nc->peer_lock); + nc->peer = NULL; + if (peer) { + qemu_net_queue_purge(peer->send_queue, nc); + } + qemu_mutex_unlock(&nc->peer_lock); +} + void qemu_del_net_client(NetClientState *nc) { NetClientState *ncs[MAX_QUEUE_NUM]; @@ -334,7 +368,9 @@ void qemu_del_net_client(NetClientState *nc) } for (i = 0; i < queues; i++) { + qemu_net_client_detach_flush(ncs[i]); qemu_cleanup_net_client(ncs[i]); + nic->pending_peer[i] = ncs[i]; } return; @@ -343,6 +379,7 @@ void qemu_del_net_client(NetClientState *nc) assert(nc->info->type != NET_CLIENT_OPTIONS_KIND_NIC); for (i = 0; i < queues; i++) { + qemu_net_client_detach_flush(ncs[i]); qemu_cleanup_net_client(ncs[i]); qemu_free_net_client(ncs[i]); } @@ -355,17 +392,19 @@ void qemu_del_nic(NICState *nic) /* If this is a peer NIC and peer has already been deleted, free it now. */ if (nic->peer_deleted) { for (i = 0; i < queues; i++) { - qemu_free_net_client(qemu_get_subqueue(nic, i)->peer); + qemu_free_net_client(nic->pending_peer[i]); } } for (i = queues - 1; i >= 0; i--) { NetClientState *nc = qemu_get_subqueue(nic, i); + qemu_net_client_detach_flush(nc); qemu_cleanup_net_client(nc); qemu_free_net_client(nc); } + g_free(nic->pending_peer); g_free(nic); } @@ -382,7 +421,7 @@ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque) } } -int qemu_can_send_packet(NetClientState *sender) +int qemu_can_send_packet_nolock(NetClientState *sender) { if (!sender->peer) { return 1; @@ -397,6 +436,28 @@ int qemu_can_send_packet(NetClientState *sender) return 1; } +int qemu_can_send_packet(NetClientState *sender) +{ + int ret = 1; + + qemu_mutex_lock(&sender->peer_lock); + if (!sender->peer) { + goto unlock; + } + + if (sender->peer->receive_disabled) { + ret = 0; + goto unlock; + } else if (sender->peer->info->can_receive && + !sender->peer->info->can_receive(sender->peer)) { + ret = 0; + goto unlock; + } +unlock: + qemu_mutex_unlock(&sender->peer_lock); + return ret; +} + ssize_t qemu_deliver_packet(NetClientState *sender, unsigned flags, const uint8_t *data, @@ -460,19 +521,24 @@ static ssize_t qemu_send_packet_async_with_flags(NetClientState *sender, NetPacketSent *sent_cb) { NetQueue *queue; + ssize_t sz; #ifdef DEBUG_NET printf("qemu_send_packet_async:\n"); hex_dump(stdout, buf, size); #endif + qemu_mutex_lock(&sender->peer_lock); if (sender->link_down || !sender->peer) { + qemu_mutex_unlock(&sender->peer_lock); return size; } queue = sender->peer->send_queue; - return qemu_net_queue_send(queue, sender, flags, buf, size, sent_cb); + sz = qemu_net_queue_send(queue, sender, flags, buf, size, sent_cb); + qemu_mutex_unlock(&sender->peer_lock); + return sz; } ssize_t qemu_send_packet_async(NetClientState *sender, @@ -540,16 +606,21 @@ ssize_t qemu_sendv_packet_async(NetClientState *sender, NetPacketSent *sent_cb) { NetQueue *queue; + ssize_t sz; + qemu_mutex_lock(&sender->peer_lock); if (sender->link_down || !sender->peer) { + qemu_mutex_unlock(&sender->peer_lock); return iov_size(iov, iovcnt); } queue = sender->peer->send_queue; - return qemu_net_queue_send_iov(queue, sender, + sz = qemu_net_queue_send_iov(queue, sender, QEMU_NET_PACKET_FLAG_NONE, iov, iovcnt, sent_cb); + qemu_mutex_unlock(&sender->peer_lock); + return sz; } ssize_t diff --git a/net/queue.c b/net/queue.c index 2856c1d..123c338 100644 --- a/net/queue.c +++ b/net/queue.c @@ -190,7 +190,7 @@ ssize_t qemu_net_queue_send(NetQueue *queue, { ssize_t ret; - if (queue->delivering || !qemu_can_send_packet(sender)) { + if (queue->delivering || !qemu_can_send_packet_nolock(sender)) { qemu_net_queue_append(queue, sender, flags, data, size, sent_cb); return 0; } @@ -215,7 +215,7 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue, { ssize_t ret; - if (queue->delivering || !qemu_can_send_packet(sender)) { + if (queue->delivering || !qemu_can_send_packet_nolock(sender)) { qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb); return 0; }