diff mbox

[V4,14/19] virtio: introduce vector to virtqueues mapping

Message ID 1426671309-13645-15-git-send-email-jasowang@redhat.com
State New
Headers show

Commit Message

Jason Wang March 18, 2015, 9:35 a.m. UTC
Currently we will try to traverse all virtqueues to find a subset that
using a specific vector. This is sub optimal when we will support
hundreds or even thousands of virtqueues. So this patch introduces a
method which could be used by transport to get all virtqueues that
using a same vector. This is done through QLISTs and the number of
QLISTs was queried through a transport specific method. When guest
setting vectors, the virtqueue will be linked and helpers for traverse
the list was also introduced.

The first user will be virtio pci which will use this to speed up
MSI-X masking and unmasking handling.

Cc: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 hw/virtio/virtio-pci.c         |  8 ++++++++
 hw/virtio/virtio.c             | 32 ++++++++++++++++++++++++++++++--
 include/hw/virtio/virtio-bus.h |  1 +
 include/hw/virtio/virtio.h     |  3 +++
 4 files changed, 42 insertions(+), 2 deletions(-)

Comments

Cornelia Huck March 20, 2015, 11:39 a.m. UTC | #1
On Wed, 18 Mar 2015 17:35:04 +0800
Jason Wang <jasowang@redhat.com> wrote:

> Currently we will try to traverse all virtqueues to find a subset that
> using a specific vector. This is sub optimal when we will support
> hundreds or even thousands of virtqueues. So this patch introduces a
> method which could be used by transport to get all virtqueues that
> using a same vector. This is done through QLISTs and the number of
> QLISTs was queried through a transport specific method. When guest
> setting vectors, the virtqueue will be linked and helpers for traverse
> the list was also introduced.
> 
> The first user will be virtio pci which will use this to speed up
> MSI-X masking and unmasking handling.
> 
> Cc: Michael S. Tsirkin <mst@redhat.com>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
>  hw/virtio/virtio-pci.c         |  8 ++++++++
>  hw/virtio/virtio.c             | 32 ++++++++++++++++++++++++++++++--
>  include/hw/virtio/virtio-bus.h |  1 +
>  include/hw/virtio/virtio.h     |  3 +++
>  4 files changed, 42 insertions(+), 2 deletions(-)

I'm still not too happy introducing this overhead for all devices when
only pci will make use of it.

Could we perhaps make the queue handling dependant on whether the
transport actually provides the query_nvectors callback?
Jason Wang March 31, 2015, 2:37 a.m. UTC | #2
On Fri, Mar 20, 2015 at 7:39 PM, Cornelia Huck 
<cornelia.huck@de.ibm.com> wrote:
> On Wed, 18 Mar 2015 17:35:04 +0800
> Jason Wang <jasowang@redhat.com> wrote:
> 
>>  Currently we will try to traverse all virtqueues to find a subset 
>> that
>>  using a specific vector. This is sub optimal when we will support
>>  hundreds or even thousands of virtqueues. So this patch introduces a
>>  method which could be used by transport to get all virtqueues that
>>  using a same vector. This is done through QLISTs and the number of
>>  QLISTs was queried through a transport specific method. When guest
>>  setting vectors, the virtqueue will be linked and helpers for 
>> traverse
>>  the list was also introduced.
>>  
>>  The first user will be virtio pci which will use this to speed up
>>  MSI-X masking and unmasking handling.
>>  
>>  Cc: Michael S. Tsirkin <mst@redhat.com>
>>  Signed-off-by: Jason Wang <jasowang@redhat.com>
>>  ---
>>   hw/virtio/virtio-pci.c         |  8 ++++++++
>>   hw/virtio/virtio.c             | 32 
>> ++++++++++++++++++++++++++++++--
>>   include/hw/virtio/virtio-bus.h |  1 +
>>   include/hw/virtio/virtio.h     |  3 +++
>>   4 files changed, 42 insertions(+), 2 deletions(-)
> 
> I'm still not too happy introducing this overhead for all devices when
> only pci will make use of it.
> 
> Could we perhaps make the queue handling dependant on whether the
> transport actually provides the query_nvectors callback?

Though I don't think it will introduce noticeable overhead. But looks 
like we can do as you suggest. Will do it in next version.

Thanks
diff mbox

Patch

diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index e556919..c38f33f 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -910,6 +910,13 @@  static const TypeInfo virtio_9p_pci_info = {
  * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
  */
 
+static int virtio_pci_query_nvectors(DeviceState *d)
+{
+    VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
+
+    return proxy->nvectors;
+}
+
 /* This is called by virtio-bus just after the device is plugged. */
 static void virtio_pci_device_plugged(DeviceState *d)
 {
@@ -1500,6 +1507,7 @@  static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
     k->vmstate_change = virtio_pci_vmstate_change;
     k->device_plugged = virtio_pci_device_plugged;
     k->device_unplugged = virtio_pci_device_unplugged;
+    k->query_nvectors = virtio_pci_query_nvectors;
     k->queue_max = VIRTIO_PCI_QUEUE_MAX;
 }
 
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 023eb04..ca157e8 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -89,6 +89,7 @@  struct VirtQueue
     VirtIODevice *vdev;
     EventNotifier guest_notifier;
     EventNotifier host_notifier;
+    QLIST_ENTRY(VirtQueue) node;
 };
 
 /* virt queue functions */
@@ -613,7 +614,7 @@  void virtio_reset(void *opaque)
         vdev->vq[i].vring.used = 0;
         vdev->vq[i].last_avail_idx = 0;
         vdev->vq[i].pa = 0;
-        vdev->vq[i].vector = VIRTIO_NO_VECTOR;
+        virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
         vdev->vq[i].signalled_used = 0;
         vdev->vq[i].signalled_used_valid = false;
         vdev->vq[i].notification = true;
@@ -738,6 +739,16 @@  void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
     virtqueue_init(&vdev->vq[n]);
 }
 
+VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
+{
+    return QLIST_FIRST(&vdev->vector_queues[vector]);
+}
+
+VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
+{
+    return QLIST_NEXT(vq, node);
+}
+
 int virtio_queue_get_num(VirtIODevice *vdev, int n)
 {
     return vdev->vq[n].vring.num;
@@ -788,8 +799,17 @@  uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
 
 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
 {
-    if (n < virtio_get_queue_max(vdev))
+    VirtQueue *vq = &vdev->vq[n];
+
+    if (n < virtio_get_queue_max(vdev)) {
+        if (vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
+            QLIST_REMOVE(vq, node);
+        }
         vdev->vq[n].vector = vector;
+        if (vector != VIRTIO_NO_VECTOR) {
+            QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
+        }
+    }
 }
 
 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
@@ -1097,6 +1117,7 @@  void virtio_cleanup(VirtIODevice *vdev)
     qemu_del_vm_change_state_handler(vdev->vmstate);
     g_free(vdev->config);
     g_free(vdev->vq);
+    g_free(vdev->vector_queues);
 }
 
 static void virtio_vmstate_change(void *opaque, int running, RunState state)
@@ -1134,7 +1155,14 @@  void virtio_instance_init_common(Object *proxy_obj, void *data,
 void virtio_init(VirtIODevice *vdev, const char *name,
                  uint16_t device_id, size_t config_size)
 {
+    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
     int i, queue_max = virtio_get_queue_max(vdev);
+    int nvectors = k->query_nvectors ?
+        k->query_nvectors(qbus->parent) : queue_max;
+
+    vdev->vector_queues = g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
+
     vdev->device_id = device_id;
     vdev->status = 0;
     vdev->isr = 0;
diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h
index 4da8022..12386d5 100644
--- a/include/hw/virtio/virtio-bus.h
+++ b/include/hw/virtio/virtio-bus.h
@@ -62,6 +62,7 @@  typedef struct VirtioBusClass {
      * This is called by virtio-bus just before the device is unplugged.
      */
     void (*device_unplugged)(DeviceState *d);
+    int (*query_nvectors)(DeviceState *d);
     /*
      * Does the transport have variable vring alignment?
      * (ie can it ever call virtio_queue_set_align()?)
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 7ff40ac..e3adb1d 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -82,6 +82,7 @@  struct VirtIODevice
     VMChangeStateEntry *vmstate;
     char *bus_name;
     uint8_t device_endian;
+    QLIST_HEAD(, VirtQueue) * vector_queues;
 };
 
 typedef struct VirtioDeviceClass {
@@ -217,6 +218,8 @@  void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
                                                bool set_handler);
 void virtio_queue_notify_vq(VirtQueue *vq);
 void virtio_irq(VirtQueue *vq);
+VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
+VirtQueue *virtio_vector_next_queue(VirtQueue *vq);
 
 static inline void virtio_add_feature(uint32_t *features, unsigned int fbit)
 {