Patchwork [06/37] net: reduce the unnecessary memory allocation of multiqueue

login
register
mail settings
Submitter Michael Roth
Date April 2, 2013, 9:45 p.m.
Message ID <1364939142-30066-7-git-send-email-mdroth@linux.vnet.ibm.com>
Download mbox | patch
Permalink /patch/233153/
State New
Headers show

Comments

Michael Roth - April 2, 2013, 9:45 p.m.
From: Jason Wang <jasowang@redhat.com>

Edivaldo reports a problem that the array of NetClientState in NICState is too
large - MAX_QUEUE_NUM(1024) which will wastes memory even if multiqueue is not
used.

Instead of static arrays, solving this issue by allocating the queues on demand
for both the NetClientState array in NICState and VirtIONetQueue array in
VirtIONet.

Tested by myself, with single virtio-net-pci device. The memory allocation is
almost the same as when multiqueue is not merged.

Cc: Edivaldo de Araujo Pereira <edivaldoapereira@yahoo.com.br>
Cc: qemu-stable@nongnu.org
Signed-off-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
(cherry picked from commit f6b26cf257232e5854c0e5c98a8685c625bf986e)

Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
---
 hw/virtio-net.c   |    6 ++++--
 include/net/net.h |    2 +-
 net/net.c         |   19 +++++++++----------
 3 files changed, 14 insertions(+), 13 deletions(-)

Patch

diff --git a/hw/virtio-net.c b/hw/virtio-net.c
index 573c669..bb2c26c 100644
--- a/hw/virtio-net.c
+++ b/hw/virtio-net.c
@@ -44,7 +44,7 @@  typedef struct VirtIONet
     VirtIODevice vdev;
     uint8_t mac[ETH_ALEN];
     uint16_t status;
-    VirtIONetQueue vqs[MAX_QUEUE_NUM];
+    VirtIONetQueue *vqs;
     VirtQueue *ctrl_vq;
     NICState *nic;
     uint32_t tx_timeout;
@@ -1326,8 +1326,9 @@  VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
     n->vdev.set_status = virtio_net_set_status;
     n->vdev.guest_notifier_mask = virtio_net_guest_notifier_mask;
     n->vdev.guest_notifier_pending = virtio_net_guest_notifier_pending;
+    n->max_queues = MAX(conf->queues, 1);
+    n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
     n->vqs[0].rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
-    n->max_queues = conf->queues;
     n->curr_queues = 1;
     n->vqs[0].n = n;
     n->tx_timeout = net->txtimer;
@@ -1412,6 +1413,7 @@  void virtio_net_exit(VirtIODevice *vdev)
         }
     }
 
+    g_free(n->vqs);
     qemu_del_nic(n->nic);
     virtio_cleanup(&n->vdev);
 }
diff --git a/include/net/net.h b/include/net/net.h
index 43a045e..cb049a1 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -72,7 +72,7 @@  struct NetClientState {
 };
 
 typedef struct NICState {
-    NetClientState ncs[MAX_QUEUE_NUM];
+    NetClientState *ncs;
     NICConf *conf;
     void *opaque;
     bool peer_deleted;
diff --git a/net/net.c b/net/net.c
index be03a8d..6262ed0 100644
--- a/net/net.c
+++ b/net/net.c
@@ -235,23 +235,20 @@  NICState *qemu_new_nic(NetClientInfo *info,
                        const char *name,
                        void *opaque)
 {
-    NetClientState *nc;
     NetClientState **peers = conf->peers.ncs;
     NICState *nic;
-    int i;
+    int i, queues = MAX(1, conf->queues);
 
     assert(info->type == NET_CLIENT_OPTIONS_KIND_NIC);
     assert(info->size >= sizeof(NICState));
 
-    nc = qemu_new_net_client(info, peers[0], model, name);
-    nc->queue_index = 0;
-
-    nic = qemu_get_nic(nc);
+    nic = g_malloc0(info->size + sizeof(NetClientState) * queues);
+    nic->ncs = (void *)nic + info->size;
     nic->conf = conf;
     nic->opaque = opaque;
 
-    for (i = 1; i < conf->queues; i++) {
-        qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, nc->name,
+    for (i = 0; i < queues; i++) {
+        qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
                               NULL);
         nic->ncs[i].queue_index = i;
     }
@@ -261,7 +258,7 @@  NICState *qemu_new_nic(NetClientInfo *info,
 
 NetClientState *qemu_get_subqueue(NICState *nic, int queue_index)
 {
-    return &nic->ncs[queue_index];
+    return nic->ncs + queue_index;
 }
 
 NetClientState *qemu_get_queue(NICState *nic)
@@ -273,7 +270,7 @@  NICState *qemu_get_nic(NetClientState *nc)
 {
     NetClientState *nc0 = nc - nc->queue_index;
 
-    return DO_UPCAST(NICState, ncs[0], nc0);
+    return (NICState *)((void *)nc0 - nc->info->size);
 }
 
 void *qemu_get_nic_opaque(NetClientState *nc)
@@ -368,6 +365,8 @@  void qemu_del_nic(NICState *nic)
         qemu_cleanup_net_client(nc);
         qemu_free_net_client(nc);
     }
+
+    g_free(nic);
 }
 
 void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)