diff mbox

[RFC,v9,18/27] virtio-blk: Call ioctl() directly instead of irqfd

Message ID 1342624074-24650-19-git-send-email-stefanha@linux.vnet.ibm.com
State New
Headers show

Commit Message

Stefan Hajnoczi July 18, 2012, 3:07 p.m. UTC
Optimize for the MSI-X enabled and vector unmasked case where it is
possible to issue the KVM ioctl() directly instead of using irqfd.

This patch introduces a new virtio binding function which tries to
notify in a thread-safe way.  If this is not possible, the function
returns false.  Virtio block then knows to use irqfd as a fallback.
---
 hw/msix.c       |   17 +++++++++++++++++
 hw/msix.h       |    1 +
 hw/virtio-blk.c |   10 ++++++++--
 hw/virtio-pci.c |    8 ++++++++
 hw/virtio.c     |    9 +++++++++
 hw/virtio.h     |    3 +++
 6 files changed, 46 insertions(+), 2 deletions(-)

Comments

Stefan Hajnoczi July 19, 2012, 9:11 a.m. UTC | #1
On Wed, Jul 18, 2012 at 4:40 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Wed, Jul 18, 2012 at 04:07:45PM +0100, Stefan Hajnoczi wrote:
>> Optimize for the MSI-X enabled and vector unmasked case where it is
>> possible to issue the KVM ioctl() directly instead of using irqfd.
>
> Why? Is an ioctl faster?

I have no benchmark results comparing irqfd against direct ioctl.  It
would be interesting to know if this "optimization" is worthwhile and
how much of a win it is.

The reasoning is that the irqfd code path signals an eventfd and then
kvm.ko's poll handler injects the interrupt.  The ioctl calls straight
into kvm.ko and skips the signalling/poll step.

Stefan
Michael S. Tsirkin July 19, 2012, 9:19 a.m. UTC | #2
On Thu, Jul 19, 2012 at 10:11:49AM +0100, Stefan Hajnoczi wrote:
> On Wed, Jul 18, 2012 at 4:40 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > On Wed, Jul 18, 2012 at 04:07:45PM +0100, Stefan Hajnoczi wrote:
> >> Optimize for the MSI-X enabled and vector unmasked case where it is
> >> possible to issue the KVM ioctl() directly instead of using irqfd.
> >
> > Why? Is an ioctl faster?
> 
> I have no benchmark results comparing irqfd against direct ioctl.  It
> would be interesting to know if this "optimization" is worthwhile and
> how much of a win it is.
> 
> The reasoning is that the irqfd code path signals an eventfd and then
> kvm.ko's poll handler injects the interrupt.  The ioctl calls straight
> into kvm.ko and skips the signalling/poll step.
> 
> Stefan

Polling is done in kernel so at least for MSI it's just a function call.
In fact ATM irqfd is more optimized.  Maybe it's faster for level IRQs
but do we really care?
diff mbox

Patch

diff --git a/hw/msix.c b/hw/msix.c
index 7955221..3308604 100644
--- a/hw/msix.c
+++ b/hw/msix.c
@@ -503,6 +503,23 @@  void msix_notify(PCIDevice *dev, unsigned vector)
     stl_le_phys(address, data);
 }
 
+bool msix_try_notify_from_thread(PCIDevice *dev, unsigned vector)
+{
+    if (unlikely(vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector])) {
+        return false;
+    }
+    if (unlikely(msix_is_masked(dev, vector))) {
+        return false;
+    }
+#ifdef KVM_CAP_IRQCHIP
+    if (likely(kvm_enabled() && kvm_irqchip_in_kernel())) {
+        kvm_set_irq(dev->msix_irq_entries[vector].gsi, 1, NULL);
+        return true;
+    }
+#endif
+    return false;
+}
+
 void msix_reset(PCIDevice *dev)
 {
     if (!(dev->cap_present & QEMU_PCI_CAP_MSIX))
diff --git a/hw/msix.h b/hw/msix.h
index a8661e1..99fb08f 100644
--- a/hw/msix.h
+++ b/hw/msix.h
@@ -26,6 +26,7 @@  void msix_vector_unuse(PCIDevice *dev, unsigned vector);
 void msix_unuse_all_vectors(PCIDevice *dev);
 
 void msix_notify(PCIDevice *dev, unsigned vector);
+bool msix_try_notify_from_thread(PCIDevice *dev, unsigned vector);
 
 void msix_reset(PCIDevice *dev);
 
diff --git a/hw/virtio-blk.c b/hw/virtio-blk.c
index bdff68a..efeffa0 100644
--- a/hw/virtio-blk.c
+++ b/hw/virtio-blk.c
@@ -82,6 +82,12 @@  static void virtio_blk_notify_guest(VirtIOBlock *s)
         !(s->vdev.guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY))))
 		return;
 
+    /* Try to issue the ioctl() directly for speed */
+    if (likely(virtio_queue_try_notify_from_thread(s->vq))) {
+        return;
+    }
+
+    /* If the fast path didn't work, use irqfd */
     event_notifier_set(virtio_queue_get_guest_notifier(s->vq));
 }
 
@@ -263,7 +269,7 @@  static void data_plane_start(VirtIOBlock *s)
     vring_setup(&s->vring, &s->vdev, 0);
 
     /* Set up guest notifier (irq) */
-    if (s->vdev.binding->set_guest_notifier(s->vdev.binding_opaque, 0, true) != 0) {
+    if (s->vdev.binding->set_guest_notifiers(s->vdev.binding_opaque, true) != 0) {
         fprintf(stderr, "virtio-blk failed to set guest notifier, ensure -enable-kvm is set\n");
         exit(1);
     }
@@ -315,7 +321,7 @@  static void data_plane_stop(VirtIOBlock *s)
     event_poll_cleanup(&s->event_poll);
 
     /* Clean up guest notifier (irq) */
-    s->vdev.binding->set_guest_notifier(s->vdev.binding_opaque, 0, false);
+    s->vdev.binding->set_guest_notifiers(s->vdev.binding_opaque, false);
 }
 
 static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t val)
diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c
index f1e13af..03512b3 100644
--- a/hw/virtio-pci.c
+++ b/hw/virtio-pci.c
@@ -106,6 +106,13 @@  static void virtio_pci_notify(void *opaque, uint16_t vector)
         qemu_set_irq(proxy->pci_dev.irq[0], proxy->vdev->isr & 1);
 }
 
+static bool virtio_pci_try_notify_from_thread(void *opaque, uint16_t vector)
+{
+    VirtIOPCIProxy *proxy = opaque;
+    return msix_enabled(&proxy->pci_dev) &&
+           msix_try_notify_from_thread(&proxy->pci_dev, vector);
+}
+
 static void virtio_pci_save_config(void * opaque, QEMUFile *f)
 {
     VirtIOPCIProxy *proxy = opaque;
@@ -707,6 +714,7 @@  static void virtio_pci_vmstate_change(void *opaque, bool running)
 
 static const VirtIOBindings virtio_pci_bindings = {
     .notify = virtio_pci_notify,
+    .try_notify_from_thread = virtio_pci_try_notify_from_thread,
     .save_config = virtio_pci_save_config,
     .load_config = virtio_pci_load_config,
     .save_queue = virtio_pci_save_queue,
diff --git a/hw/virtio.c b/hw/virtio.c
index 064aecf..a1d1a8a 100644
--- a/hw/virtio.c
+++ b/hw/virtio.c
@@ -689,6 +689,15 @@  static inline int vring_need_event(uint16_t event, uint16_t new, uint16_t old)
 	return (uint16_t)(new - event - 1) < (uint16_t)(new - old);
 }
 
+bool virtio_queue_try_notify_from_thread(VirtQueue *vq)
+{
+    VirtIODevice *vdev = vq->vdev;
+    if (likely(vdev->binding->try_notify_from_thread)) {
+        return vdev->binding->try_notify_from_thread(vdev->binding_opaque, vq->vector);
+    }
+    return false;
+}
+
 static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq)
 {
     uint16_t old, new;
diff --git a/hw/virtio.h b/hw/virtio.h
index 400c092..2cdf2be 100644
--- a/hw/virtio.h
+++ b/hw/virtio.h
@@ -93,6 +93,7 @@  typedef struct VirtQueueElement
 
 typedef struct {
     void (*notify)(void * opaque, uint16_t vector);
+    bool (*try_notify_from_thread)(void * opaque, uint16_t vector);
     void (*save_config)(void * opaque, QEMUFile *f);
     void (*save_queue)(void * opaque, int n, QEMUFile *f);
     int (*load_config)(void * opaque, QEMUFile *f);
@@ -160,6 +161,8 @@  void virtio_cleanup(VirtIODevice *vdev);
 
 void virtio_notify_config(VirtIODevice *vdev);
 
+bool virtio_queue_try_notify_from_thread(VirtQueue *vq);
+
 void virtio_queue_set_notification(VirtQueue *vq, int enable);
 
 int virtio_queue_ready(VirtQueue *vq);