diff mbox

[2/3] virtio-pci: Use ioeventfd for virtqueue notify

Message ID 1289483242-6069-3-git-send-email-stefanha@linux.vnet.ibm.com
State New
Headers show

Commit Message

Stefan Hajnoczi Nov. 11, 2010, 1:47 p.m. UTC
Virtqueue notify is currently handled synchronously in userspace virtio.  This
prevents the vcpu from executing guest code while hardware emulation code
handles the notify.

On systems that support KVM, the ioeventfd mechanism can be used to make
virtqueue notify a lightweight exit by deferring hardware emulation to the
iothread and allowing the VM to continue execution.  This model is similar to
how vhost receives virtqueue notifies.

The result of this change is improved performance for userspace virtio devices.
Virtio-blk throughput increases especially for multithreaded scenarios and
virtio-net transmit throughput increases substantially.

Some virtio devices are known to have guest drivers which expect a notify to be
processed synchronously and spin waiting for completion.  Only enable ioeventfd
for virtio-blk and virtio-net for now.

Care must be taken not to interfere with vhost-net, which already uses
ioeventfd host notifiers.  The following list shows the behavior implemented in
this patch and is designed to take vhost-net into account:

 * VIRTIO_CONFIG_S_DRIVER_OK -> assign host notifiers, qemu_set_fd_handler(virtio_pci_host_notifier_read)
 * reset -> qemu_set_fd_handler(NULL), deassign host notifiers
 * virtio_pci_set_host_notifier(true) -> qemu_set_fd_handler(NULL)
 * virtio_pci_set_host_notifier(false) -> qemu_set_fd_handler(virtio_pci_host_notifier_read)

Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
---
 hw/virtio-pci.c |  155 +++++++++++++++++++++++++++++++++++++++++++-----------
 hw/virtio.c     |    5 ++
 hw/virtio.h     |    1 +
 3 files changed, 129 insertions(+), 32 deletions(-)

Comments

Michael S. Tsirkin Nov. 11, 2010, 3:53 p.m. UTC | #1
On Thu, Nov 11, 2010 at 01:47:21PM +0000, Stefan Hajnoczi wrote:
> Virtqueue notify is currently handled synchronously in userspace virtio.  This
> prevents the vcpu from executing guest code while hardware emulation code
> handles the notify.
> 
> On systems that support KVM, the ioeventfd mechanism can be used to make
> virtqueue notify a lightweight exit by deferring hardware emulation to the
> iothread and allowing the VM to continue execution.  This model is similar to
> how vhost receives virtqueue notifies.
> 
> The result of this change is improved performance for userspace virtio devices.
> Virtio-blk throughput increases especially for multithreaded scenarios and
> virtio-net transmit throughput increases substantially.
> 
> Some virtio devices are known to have guest drivers which expect a notify to be
> processed synchronously and spin waiting for completion.  Only enable ioeventfd
> for virtio-blk and virtio-net for now.
> 
> Care must be taken not to interfere with vhost-net, which already uses
> ioeventfd host notifiers.  The following list shows the behavior implemented in
> this patch and is designed to take vhost-net into account:
> 
>  * VIRTIO_CONFIG_S_DRIVER_OK -> assign host notifiers, qemu_set_fd_handler(virtio_pci_host_notifier_read)

we should also deassign when VIRTIO_CONFIG_S_DRIVER_OK is cleared
by io write or bus master bit?

>  * reset -> qemu_set_fd_handler(NULL), deassign host notifiers
>  * virtio_pci_set_host_notifier(true) -> qemu_set_fd_handler(NULL)
>  * virtio_pci_set_host_notifier(false) -> qemu_set_fd_handler(virtio_pci_host_notifier_read)
> 
> Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
> ---
>  hw/virtio-pci.c |  155 +++++++++++++++++++++++++++++++++++++++++++-----------
>  hw/virtio.c     |    5 ++
>  hw/virtio.h     |    1 +
>  3 files changed, 129 insertions(+), 32 deletions(-)
> 
> diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c
> index 549118d..436fc59 100644
> --- a/hw/virtio-pci.c
> +++ b/hw/virtio-pci.c
> @@ -83,6 +83,10 @@
>  /* Flags track per-device state like workarounds for quirks in older guests. */
>  #define VIRTIO_PCI_FLAG_BUS_MASTER_BUG  (1 << 0)
>  
> +/* Performance improves when virtqueue kick processing is decoupled from the
> + * vcpu thread using ioeventfd for some devices. */
> +#define VIRTIO_PCI_FLAG_USE_IOEVENTFD   (1 << 1)
> +
>  /* QEMU doesn't strictly need write barriers since everything runs in
>   * lock-step.  We'll leave the calls to wmb() in though to make it obvious for
>   * KVM or if kqemu gets SMP support.
> @@ -179,12 +183,108 @@ static int virtio_pci_load_queue(void * opaque, int n, QEMUFile *f)
>      return 0;
>  }
>  
> +static int virtio_pci_set_host_notifier_ioeventfd(VirtIOPCIProxy *proxy, int n, bool assign)
> +{
> +    VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
> +    EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
> +    int r;
> +    if (assign) {
> +        r = event_notifier_init(notifier, 1);
> +        if (r < 0) {
> +            return r;
> +        }
> +        r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
> +                                       proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
> +                                       n, assign);
> +        if (r < 0) {
> +            event_notifier_cleanup(notifier);
> +        }
> +    } else {
> +        r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
> +                                       proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
> +                                       n, assign);
> +        if (r < 0) {
> +            return r;
> +        }
> +        event_notifier_cleanup(notifier);
> +    }
> +    return r;
> +}
> +
> +static void virtio_pci_host_notifier_read(void *opaque)
> +{
> +    VirtQueue *vq = opaque;
> +    EventNotifier *n = virtio_queue_get_host_notifier(vq);
> +    if (event_notifier_test_and_clear(n)) {
> +        virtio_queue_notify_vq(vq);
> +    }
> +}
> +
> +static void virtio_pci_set_host_notifier_fd_handler(VirtIOPCIProxy *proxy, int n, bool assign)
> +{
> +    VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
> +    EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
> +    if (assign) {
> +        qemu_set_fd_handler(event_notifier_get_fd(notifier),
> +                            virtio_pci_host_notifier_read, NULL, vq);
> +    } else {
> +        qemu_set_fd_handler(event_notifier_get_fd(notifier),
> +                            NULL, NULL, NULL);
> +    }
> +}
> +
> +static int virtio_pci_set_host_notifiers(VirtIOPCIProxy *proxy, bool assign)
> +{
> +    int n, r;
> +
> +    for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
> +        if (!virtio_queue_get_num(proxy->vdev, n)) {
> +            continue;
> +        }
> +
> +        if (assign) {
> +            r = virtio_pci_set_host_notifier_ioeventfd(proxy, n, true);
> +            if (r < 0) {
> +                goto assign_error;
> +            }
> +
> +            virtio_pci_set_host_notifier_fd_handler(proxy, n, true);
> +        } else {
> +            virtio_pci_set_host_notifier_fd_handler(proxy, n, false);
> +            virtio_pci_set_host_notifier_ioeventfd(proxy, n, false);
> +        }
> +    }
> +    return 0;
> +
> +assign_error:
> +    proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
> +    while (--n >= 0) {
> +        virtio_pci_set_host_notifier_fd_handler(proxy, n, false);
> +        virtio_pci_set_host_notifier_ioeventfd(proxy, n, false);
> +    }
> +    return r;
> +}
> +
> +static void virtio_pci_reset_vdev(VirtIOPCIProxy *proxy)
> +{
> +    /* Poke virtio device so it deassigns its host notifiers (if any) */
> +    virtio_set_status(proxy->vdev, 0);

Hmm. virtio_reset already sets status to 0.
I guess it should just be fixed to call virtio_set_status?

> +
> +    /* Now safely deassign our own host notifiers */
> +    if (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) {
> +        virtio_pci_set_host_notifiers(proxy, false);
> +    }
> +
> +    virtio_reset(proxy->vdev);
> +    msix_unuse_all_vectors(&proxy->pci_dev);
> +}
> +
>  static void virtio_pci_reset(DeviceState *d)
>  {
>      VirtIOPCIProxy *proxy = container_of(d, VirtIOPCIProxy, pci_dev.qdev);
> -    virtio_reset(proxy->vdev);
> +    virtio_pci_reset_vdev(proxy);
>      msix_reset(&proxy->pci_dev);
> -    proxy->flags = 0;
> +    proxy->flags &= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
>  }
>  
>  static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
> @@ -209,11 +309,10 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
>      case VIRTIO_PCI_QUEUE_PFN:
>          pa = (target_phys_addr_t)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
>          if (pa == 0) {
> -            virtio_reset(proxy->vdev);
> -            msix_unuse_all_vectors(&proxy->pci_dev);
> -        }
> -        else
> +            virtio_pci_reset_vdev(proxy);
> +        } else {
>              virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
> +        }
>          break;
>      case VIRTIO_PCI_QUEUE_SEL:
>          if (val < VIRTIO_PCI_QUEUE_MAX)
> @@ -223,10 +322,16 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
>          virtio_queue_notify(vdev, val);
>          break;
>      case VIRTIO_PCI_STATUS:
> -        virtio_set_status(vdev, val & 0xFF);
> -        if (vdev->status == 0) {
> -            virtio_reset(proxy->vdev);
> -            msix_unuse_all_vectors(&proxy->pci_dev);
> +        if ((val & VIRTIO_CONFIG_S_DRIVER_OK) &&
> +            !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
> +            (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD)) {
> +            virtio_pci_set_host_notifiers(proxy, true);
> +        }

So we set host notifiers to true from here, but to false
only on reset? This seems strange. Should not we disable
notifiers when driver clears OK status?
How about on bus master disable?

> +
> +        if (val & 0xFF) {
> +            virtio_set_status(vdev, val & 0xFF);
> +        } else {
> +            virtio_pci_reset_vdev(proxy);
>          }
>  
>          /* Linux before 2.6.34 sets the device as OK without enabling
> @@ -480,30 +585,12 @@ assign_error:
>  static int virtio_pci_set_host_notifier(void *opaque, int n, bool assign)
>  {
>      VirtIOPCIProxy *proxy = opaque;
> -    VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
> -    EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
> -    int r;
> -    if (assign) {
> -        r = event_notifier_init(notifier, 1);
> -        if (r < 0) {
> -            return r;
> -        }
> -        r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
> -                                       proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
> -                                       n, assign);
> -        if (r < 0) {
> -            event_notifier_cleanup(notifier);
> -        }
> +    if (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) {
> +        virtio_pci_set_host_notifier_fd_handler(proxy, n, !assign);
> +        return 0;
>      } else {
> -        r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
> -                                       proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
> -                                       n, assign);
> -        if (r < 0) {
> -            return r;
> -        }
> -        event_notifier_cleanup(notifier);
> +        return virtio_pci_set_host_notifier_ioeventfd(proxy, n, assign);
>      }
> -    return r;
>  }
>  
>  static const VirtIOBindings virtio_pci_bindings = {
> @@ -702,6 +789,8 @@ static PCIDeviceInfo virtio_info[] = {
>          .qdev.props = (Property[]) {
>              DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
>              DEFINE_BLOCK_PROPERTIES(VirtIOPCIProxy, block),
> +            DEFINE_PROP_UINT32("flags", VirtIOPCIProxy, flags,
> +                               VIRTIO_PCI_FLAG_USE_IOEVENTFD),
>              DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
>              DEFINE_VIRTIO_BLK_FEATURES(VirtIOPCIProxy, host_features),
>              DEFINE_PROP_END_OF_LIST(),
> @@ -714,6 +803,8 @@ static PCIDeviceInfo virtio_info[] = {
>          .exit       = virtio_net_exit_pci,
>          .romfile    = "pxe-virtio.bin",
>          .qdev.props = (Property[]) {
> +            DEFINE_PROP_UINT32("flags", VirtIOPCIProxy, flags,
> +                               VIRTIO_PCI_FLAG_USE_IOEVENTFD),
>              DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
>              DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
>              DEFINE_NIC_PROPERTIES(VirtIOPCIProxy, nic),

This ties interface to an internal macro value.  Further, user gets to
tweak other fields in this integer which we don't want.  Finally, the
interface is extremely unfriendly.
Please use a bit property instead: DEFINE_PROP_BIT.

> diff --git a/hw/virtio.c b/hw/virtio.c
> index a2a657e..f588e29 100644
> --- a/hw/virtio.c
> +++ b/hw/virtio.c
> @@ -582,6 +582,11 @@ void virtio_queue_notify(VirtIODevice *vdev, int n)
>      }
>  }
>  
> +void virtio_queue_notify_vq(VirtQueue *vq)
> +{
> +    virtio_queue_notify(vq->vdev, vq - vq->vdev->vq);

Let's implement virtio_queue_notify in terms of virtio_queue_notify_vq.
Not the other way around.

> +}
> +
>  uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
>  {
>      return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
> diff --git a/hw/virtio.h b/hw/virtio.h
> index 02fa312..5ae521c 100644
> --- a/hw/virtio.h
> +++ b/hw/virtio.h
> @@ -219,5 +219,6 @@ void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx);
>  VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n);
>  EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
>  EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
> +void virtio_queue_notify_vq(VirtQueue *vq);
>  void virtio_irq(VirtQueue *vq);
>  #endif
> -- 
> 1.7.2.3
Christoph Hellwig Nov. 11, 2010, 4:45 p.m. UTC | #2
On Thu, Nov 11, 2010 at 01:47:21PM +0000, Stefan Hajnoczi wrote:
> Some virtio devices are known to have guest drivers which expect a notify to be
> processed synchronously and spin waiting for completion.  Only enable ioeventfd
> for virtio-blk and virtio-net for now.

Who guarantees that less common virtio-blk and virtio-net guest drivers
for non-Linux OSes are fine with it?  Maybe you should add a feature flag
that the guest has to ACK to enable it.
Avi Kivity Nov. 11, 2010, 4:59 p.m. UTC | #3
On 11/11/2010 03:47 PM, Stefan Hajnoczi wrote:
> Some virtio devices are known to have guest drivers which expect a notify to be
> processed synchronously and spin waiting for completion.  Only enable ioeventfd
> for virtio-blk and virtio-net for now.

Which drivers are these?

I only know of the virtio-gl driver, which isn't upstream.
Michael S. Tsirkin Nov. 11, 2010, 5:12 p.m. UTC | #4
On Thu, Nov 11, 2010 at 06:59:29PM +0200, Avi Kivity wrote:
> On 11/11/2010 03:47 PM, Stefan Hajnoczi wrote:
> >Some virtio devices are known to have guest drivers which expect a notify to be
> >processed synchronously and spin waiting for completion.  Only enable ioeventfd
> >for virtio-blk and virtio-net for now.
> 
> Which drivers are these?
> 
> I only know of the virtio-gl driver, which isn't upstream.

I think that PXE virtio drivers do polling. But I think this change does
not break these drivers. It'll probably make them a bit slower.  Right,
Gleb?
Gleb Natapov Nov. 11, 2010, 5:55 p.m. UTC | #5
On Thu, Nov 11, 2010 at 07:12:40PM +0200, Michael S. Tsirkin wrote:
> On Thu, Nov 11, 2010 at 06:59:29PM +0200, Avi Kivity wrote:
> > On 11/11/2010 03:47 PM, Stefan Hajnoczi wrote:
> > >Some virtio devices are known to have guest drivers which expect a notify to be
> > >processed synchronously and spin waiting for completion.  Only enable ioeventfd
> > >for virtio-blk and virtio-net for now.
> > 
> > Which drivers are these?
> > 
> > I only know of the virtio-gl driver, which isn't upstream.
> 
> I think that PXE virtio drivers do polling. But I think this change does
> not break these drivers. It'll probably make them a bit slower.  Right,
> Gleb?
> 
Can't tell about PXE virtio drivers, but virtio-blk in seabios does
polling.

--
			Gleb.
Stefan Hajnoczi Nov. 12, 2010, 9:18 a.m. UTC | #6
On Thu, Nov 11, 2010 at 3:53 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Thu, Nov 11, 2010 at 01:47:21PM +0000, Stefan Hajnoczi wrote:
>> Care must be taken not to interfere with vhost-net, which already uses
>> ioeventfd host notifiers.  The following list shows the behavior implemented in
>> this patch and is designed to take vhost-net into account:
>>
>>  * VIRTIO_CONFIG_S_DRIVER_OK -> assign host notifiers, qemu_set_fd_handler(virtio_pci_host_notifier_read)
>
> we should also deassign when VIRTIO_CONFIG_S_DRIVER_OK is cleared
> by io write or bus master bit?

You're right, I'll fix the lifecycle to trigger symmetrically on
status bit changes rather than VIRTIO_CONFIG_S_DRIVER_OK/reset.

>> +static void virtio_pci_reset_vdev(VirtIOPCIProxy *proxy)
>> +{
>> +    /* Poke virtio device so it deassigns its host notifiers (if any) */
>> +    virtio_set_status(proxy->vdev, 0);
>
> Hmm. virtio_reset already sets status to 0.
> I guess it should just be fixed to call virtio_set_status?

This part is ugly.  The problem is that virtio_reset() calls
virtio_set_status(vdev, 0) but doesn't give the transport binding a
chance clean up after the virtio device has cleaned up.  Since
virtio-net will spot status=0 and deassign its host notifier, we need
to perform our own clean up after vhost.

What makes this slightly less of a hack is the fact that virtio-pci.c
was already causing virtio_set_status(vdev, 0) to be invoked twice
during reset.  When 0 is written to the VIRTIO_PCI_STATUS register, we
do virtio_set_status(proxy->vdev, val & 0xFF) and then
virtio_reset(proxy->vdev).  So the status byte callback already gets
invoked twice today.

I've just split this out into virtio_pci_reset_vdev() and (ab)used it
to correctly clean up virtqueue ioeventfd.

The alternative is to add another callback from virtio.c so we are
notified after the vdev's reset callback has finished.

>> @@ -223,10 +322,16 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
>>          virtio_queue_notify(vdev, val);
>>          break;
>>      case VIRTIO_PCI_STATUS:
>> -        virtio_set_status(vdev, val & 0xFF);
>> -        if (vdev->status == 0) {
>> -            virtio_reset(proxy->vdev);
>> -            msix_unuse_all_vectors(&proxy->pci_dev);
>> +        if ((val & VIRTIO_CONFIG_S_DRIVER_OK) &&
>> +            !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
>> +            (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD)) {
>> +            virtio_pci_set_host_notifiers(proxy, true);
>> +        }
>
> So we set host notifiers to true from here, but to false
> only on reset? This seems strange. Should not we disable
> notifiers when driver clears OK status?
> How about on bus master disable?

You're right, this needs to be fixed.

>> @@ -714,6 +803,8 @@ static PCIDeviceInfo virtio_info[] = {
>>          .exit       = virtio_net_exit_pci,
>>          .romfile    = "pxe-virtio.bin",
>>          .qdev.props = (Property[]) {
>> +            DEFINE_PROP_UINT32("flags", VirtIOPCIProxy, flags,
>> +                               VIRTIO_PCI_FLAG_USE_IOEVENTFD),
>>              DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
>>              DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
>>              DEFINE_NIC_PROPERTIES(VirtIOPCIProxy, nic),
>
> This ties interface to an internal macro value.  Further, user gets to
> tweak other fields in this integer which we don't want.  Finally, the
> interface is extremely unfriendly.
> Please use a bit property instead: DEFINE_PROP_BIT.

Will fix in v3.

>> diff --git a/hw/virtio.c b/hw/virtio.c
>> index a2a657e..f588e29 100644
>> --- a/hw/virtio.c
>> +++ b/hw/virtio.c
>> @@ -582,6 +582,11 @@ void virtio_queue_notify(VirtIODevice *vdev, int n)
>>      }
>>  }
>>
>> +void virtio_queue_notify_vq(VirtQueue *vq)
>> +{
>> +    virtio_queue_notify(vq->vdev, vq - vq->vdev->vq);
>
> Let's implement virtio_queue_notify in terms of virtio_queue_notify_vq.
> Not the other way around.

Will fix in v3.

Stefan
Stefan Hajnoczi Nov. 12, 2010, 9:20 a.m. UTC | #7
On Thu, Nov 11, 2010 at 4:45 PM, Christoph Hellwig <hch@infradead.org> wrote:
> On Thu, Nov 11, 2010 at 01:47:21PM +0000, Stefan Hajnoczi wrote:
>> Some virtio devices are known to have guest drivers which expect a notify to be
>> processed synchronously and spin waiting for completion.  Only enable ioeventfd
>> for virtio-blk and virtio-net for now.
>
> Who guarantees that less common virtio-blk and virtio-net guest drivers
> for non-Linux OSes are fine with it?  Maybe you should add a feature flag
> that the guest has to ACK to enable it.

Virtio-blk and virtio-net are fine.  Both of those devices are
expected to operate asynchronously.  SeaBIOS and gPXE virtio-net
drivers spin but they expect to and it is okay in those environments.
They already burn CPU today.

Virtio-console expects synchronous virtqueue kick.  In Linux,
virtio_console.c __send_control_msg() and send_buf() will spin.  Qemu
userspace is able to complete those requests synchronously so that the
guest never actually burns CPU (e.g.
hw/virtio-serial-bus.c:send_control_msg()).  I don't want to burn CPU
in places where we previously didn't.

It's good that QEMU can decide whether or not to handle virtqueue kick
in the vcpu thread.  For high performance asynchronous devices like
virtio-net and virtio-blk it makes sense to use ioeventfd.  For others
it may not be useful.  I'm not sure a feature bit that exposes this
detail to the guest would be useful.

Stefan
Michael S. Tsirkin Nov. 12, 2010, 9:25 a.m. UTC | #8
On Fri, Nov 12, 2010 at 09:18:48AM +0000, Stefan Hajnoczi wrote:
> On Thu, Nov 11, 2010 at 3:53 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > On Thu, Nov 11, 2010 at 01:47:21PM +0000, Stefan Hajnoczi wrote:
> >> Care must be taken not to interfere with vhost-net, which already uses
> >> ioeventfd host notifiers.  The following list shows the behavior implemented in
> >> this patch and is designed to take vhost-net into account:
> >>
> >>  * VIRTIO_CONFIG_S_DRIVER_OK -> assign host notifiers, qemu_set_fd_handler(virtio_pci_host_notifier_read)
> >
> > we should also deassign when VIRTIO_CONFIG_S_DRIVER_OK is cleared
> > by io write or bus master bit?
> 
> You're right, I'll fix the lifecycle to trigger symmetrically on
> status bit changes rather than VIRTIO_CONFIG_S_DRIVER_OK/reset.
> 
> >> +static void virtio_pci_reset_vdev(VirtIOPCIProxy *proxy)
> >> +{
> >> +    /* Poke virtio device so it deassigns its host notifiers (if any) */
> >> +    virtio_set_status(proxy->vdev, 0);
> >
> > Hmm. virtio_reset already sets status to 0.
> > I guess it should just be fixed to call virtio_set_status?
> 
> This part is ugly.  The problem is that virtio_reset() calls
> virtio_set_status(vdev, 0) but doesn't give the transport binding a
> chance clean up after the virtio device has cleaned up.  Since
> virtio-net will spot status=0 and deassign its host notifier, we need
> to perform our own clean up after vhost.
> 
> What makes this slightly less of a hack is the fact that virtio-pci.c
> was already causing virtio_set_status(vdev, 0) to be invoked twice
> during reset.  When 0 is written to the VIRTIO_PCI_STATUS register, we
> do virtio_set_status(proxy->vdev, val & 0xFF) and then
> virtio_reset(proxy->vdev).  So the status byte callback already gets
> invoked twice today.
> 
> I've just split this out into virtio_pci_reset_vdev() and (ab)used it
> to correctly clean up virtqueue ioeventfd.
> 
> The alternative is to add another callback from virtio.c so we are
> notified after the vdev's reset callback has finished.

Oh, likely not worth it. Mabe put the above explanation in the comment.
Will this go away now that you move to set notifiers on status write?

> >> @@ -223,10 +322,16 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
> >>          virtio_queue_notify(vdev, val);
> >>          break;
> >>      case VIRTIO_PCI_STATUS:
> >> -        virtio_set_status(vdev, val & 0xFF);
> >> -        if (vdev->status == 0) {
> >> -            virtio_reset(proxy->vdev);
> >> -            msix_unuse_all_vectors(&proxy->pci_dev);
> >> +        if ((val & VIRTIO_CONFIG_S_DRIVER_OK) &&
> >> +            !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
> >> +            (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD)) {
> >> +            virtio_pci_set_host_notifiers(proxy, true);
> >> +        }
> >
> > So we set host notifiers to true from here, but to false
> > only on reset? This seems strange. Should not we disable
> > notifiers when driver clears OK status?
> > How about on bus master disable?
> 
> You're right, this needs to be fixed.
> 
> >> @@ -714,6 +803,8 @@ static PCIDeviceInfo virtio_info[] = {
> >>          .exit       = virtio_net_exit_pci,
> >>          .romfile    = "pxe-virtio.bin",
> >>          .qdev.props = (Property[]) {
> >> +            DEFINE_PROP_UINT32("flags", VirtIOPCIProxy, flags,
> >> +                               VIRTIO_PCI_FLAG_USE_IOEVENTFD),
> >>              DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
> >>              DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
> >>              DEFINE_NIC_PROPERTIES(VirtIOPCIProxy, nic),
> >
> > This ties interface to an internal macro value.  Further, user gets to
> > tweak other fields in this integer which we don't want.  Finally, the
> > interface is extremely unfriendly.
> > Please use a bit property instead: DEFINE_PROP_BIT.
> 
> Will fix in v3.
> 
> >> diff --git a/hw/virtio.c b/hw/virtio.c
> >> index a2a657e..f588e29 100644
> >> --- a/hw/virtio.c
> >> +++ b/hw/virtio.c
> >> @@ -582,6 +582,11 @@ void virtio_queue_notify(VirtIODevice *vdev, int n)
> >>      }
> >>  }
> >>
> >> +void virtio_queue_notify_vq(VirtQueue *vq)
> >> +{
> >> +    virtio_queue_notify(vq->vdev, vq - vq->vdev->vq);
> >
> > Let's implement virtio_queue_notify in terms of virtio_queue_notify_vq.
> > Not the other way around.
> 
> Will fix in v3.
> 
> Stefan
Stefan Hajnoczi Nov. 12, 2010, 1:10 p.m. UTC | #9
On Fri, Nov 12, 2010 at 9:25 AM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Fri, Nov 12, 2010 at 09:18:48AM +0000, Stefan Hajnoczi wrote:
>> On Thu, Nov 11, 2010 at 3:53 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>> > On Thu, Nov 11, 2010 at 01:47:21PM +0000, Stefan Hajnoczi wrote:
>> >> Care must be taken not to interfere with vhost-net, which already uses
>> >> ioeventfd host notifiers.  The following list shows the behavior implemented in
>> >> this patch and is designed to take vhost-net into account:
>> >>
>> >>  * VIRTIO_CONFIG_S_DRIVER_OK -> assign host notifiers, qemu_set_fd_handler(virtio_pci_host_notifier_read)
>> >
>> > we should also deassign when VIRTIO_CONFIG_S_DRIVER_OK is cleared
>> > by io write or bus master bit?
>>
>> You're right, I'll fix the lifecycle to trigger symmetrically on
>> status bit changes rather than VIRTIO_CONFIG_S_DRIVER_OK/reset.
>>
>> >> +static void virtio_pci_reset_vdev(VirtIOPCIProxy *proxy)
>> >> +{
>> >> +    /* Poke virtio device so it deassigns its host notifiers (if any) */
>> >> +    virtio_set_status(proxy->vdev, 0);
>> >
>> > Hmm. virtio_reset already sets status to 0.
>> > I guess it should just be fixed to call virtio_set_status?
>>
>> This part is ugly.  The problem is that virtio_reset() calls
>> virtio_set_status(vdev, 0) but doesn't give the transport binding a
>> chance clean up after the virtio device has cleaned up.  Since
>> virtio-net will spot status=0 and deassign its host notifier, we need
>> to perform our own clean up after vhost.
>>
>> What makes this slightly less of a hack is the fact that virtio-pci.c
>> was already causing virtio_set_status(vdev, 0) to be invoked twice
>> during reset.  When 0 is written to the VIRTIO_PCI_STATUS register, we
>> do virtio_set_status(proxy->vdev, val & 0xFF) and then
>> virtio_reset(proxy->vdev).  So the status byte callback already gets
>> invoked twice today.
>>
>> I've just split this out into virtio_pci_reset_vdev() and (ab)used it
>> to correctly clean up virtqueue ioeventfd.
>>
>> The alternative is to add another callback from virtio.c so we are
>> notified after the vdev's reset callback has finished.
>
> Oh, likely not worth it. Mabe put the above explanation in the comment.
> Will this go away now that you move to set notifiers on status write?

For v3 I have switched to a bindings callback.  I wish it wasn't
necessary but the only other ways I can think of catching status
writes are hacks which depend on side-effects too much.

Stefan
Avi Kivity Nov. 14, 2010, 10:34 a.m. UTC | #10
On 11/12/2010 11:20 AM, Stefan Hajnoczi wrote:
> >  Who guarantees that less common virtio-blk and virtio-net guest drivers
> >  for non-Linux OSes are fine with it?  Maybe you should add a feature flag
> >  that the guest has to ACK to enable it.
>
> Virtio-blk and virtio-net are fine.  Both of those devices are
> expected to operate asynchronously.  SeaBIOS and gPXE virtio-net
> drivers spin but they expect to and it is okay in those environments.
> They already burn CPU today.
>
> Virtio-console expects synchronous virtqueue kick.  In Linux,
> virtio_console.c __send_control_msg() and send_buf() will spin.  Qemu
> userspace is able to complete those requests synchronously so that the
> guest never actually burns CPU (e.g.
> hw/virtio-serial-bus.c:send_control_msg()).  I don't want to burn CPU
> in places where we previously didn't.

This is a horrible bug.  virtio is an asynchronous API.  Some hypervisor 
implementations cannot even provide synchronous notifications.

> It's good that QEMU can decide whether or not to handle virtqueue kick
> in the vcpu thread.  For high performance asynchronous devices like
> virtio-net and virtio-blk it makes sense to use ioeventfd.  For others
> it may not be useful.  I'm not sure a feature bit that exposes this
> detail to the guest would be useful.

The guest should always assume that virtio devices are asynchronous.
Stefan Hajnoczi Nov. 14, 2010, 10:37 a.m. UTC | #11
On Sun, Nov 14, 2010 at 10:34 AM, Avi Kivity <avi@redhat.com> wrote:
> On 11/12/2010 11:20 AM, Stefan Hajnoczi wrote:
>>
>> >  Who guarantees that less common virtio-blk and virtio-net guest drivers
>> >  for non-Linux OSes are fine with it?  Maybe you should add a feature
>> > flag
>> >  that the guest has to ACK to enable it.
>>
>> Virtio-blk and virtio-net are fine.  Both of those devices are
>> expected to operate asynchronously.  SeaBIOS and gPXE virtio-net
>> drivers spin but they expect to and it is okay in those environments.
>> They already burn CPU today.
>>
>> Virtio-console expects synchronous virtqueue kick.  In Linux,
>> virtio_console.c __send_control_msg() and send_buf() will spin.  Qemu
>> userspace is able to complete those requests synchronously so that the
>> guest never actually burns CPU (e.g.
>> hw/virtio-serial-bus.c:send_control_msg()).  I don't want to burn CPU
>> in places where we previously didn't.
>
> This is a horrible bug.  virtio is an asynchronous API.  Some hypervisor
> implementations cannot even provide synchronous notifications.
>
>> It's good that QEMU can decide whether or not to handle virtqueue kick
>> in the vcpu thread.  For high performance asynchronous devices like
>> virtio-net and virtio-blk it makes sense to use ioeventfd.  For others
>> it may not be useful.  I'm not sure a feature bit that exposes this
>> detail to the guest would be useful.
>
> The guest should always assume that virtio devices are asynchronous.

I agree, but let's enable virtio-ioeventfd carefully because bad code
is out there.

Stefan
Avi Kivity Nov. 14, 2010, 11:05 a.m. UTC | #12
On 11/14/2010 12:37 PM, Stefan Hajnoczi wrote:
> On Sun, Nov 14, 2010 at 10:34 AM, Avi Kivity<avi@redhat.com>  wrote:
> >  On 11/12/2010 11:20 AM, Stefan Hajnoczi wrote:
> >>
> >>  >    Who guarantees that less common virtio-blk and virtio-net guest drivers
> >>  >    for non-Linux OSes are fine with it?  Maybe you should add a feature
> >>  >  flag
> >>  >    that the guest has to ACK to enable it.
> >>
> >>  Virtio-blk and virtio-net are fine.  Both of those devices are
> >>  expected to operate asynchronously.  SeaBIOS and gPXE virtio-net
> >>  drivers spin but they expect to and it is okay in those environments.
> >>  They already burn CPU today.
> >>
> >>  Virtio-console expects synchronous virtqueue kick.  In Linux,
> >>  virtio_console.c __send_control_msg() and send_buf() will spin.  Qemu
> >>  userspace is able to complete those requests synchronously so that the
> >>  guest never actually burns CPU (e.g.
> >>  hw/virtio-serial-bus.c:send_control_msg()).  I don't want to burn CPU
> >>  in places where we previously didn't.
> >
> >  This is a horrible bug.  virtio is an asynchronous API.  Some hypervisor
> >  implementations cannot even provide synchronous notifications.
> >
> >>  It's good that QEMU can decide whether or not to handle virtqueue kick
> >>  in the vcpu thread.  For high performance asynchronous devices like
> >>  virtio-net and virtio-blk it makes sense to use ioeventfd.  For others
> >>  it may not be useful.  I'm not sure a feature bit that exposes this
> >>  detail to the guest would be useful.
> >
> >  The guest should always assume that virtio devices are asynchronous.
>
> I agree, but let's enable virtio-ioeventfd carefully because bad code
> is out there.

Sure.  Note as long as the thread waiting on ioeventfd doesn't consume 
too much cpu, it will awaken quickly and we won't have the "transaction 
per timeslice" effect.

btw, what about virtio-blk with linux-aio?  Have you benchmarked that 
with and without ioeventfd?
Avi Kivity Nov. 14, 2010, 12:19 p.m. UTC | #13
On 11/14/2010 01:05 PM, Avi Kivity wrote:
>> I agree, but let's enable virtio-ioeventfd carefully because bad code
>> is out there.
>
>
> Sure.  Note as long as the thread waiting on ioeventfd doesn't consume 
> too much cpu, it will awaken quickly and we won't have the 
> "transaction per timeslice" effect.
>
> btw, what about virtio-blk with linux-aio?  Have you benchmarked that 
> with and without ioeventfd?
>

And, what about efficiency?  As in bits/cycle?
Stefan Hajnoczi Nov. 15, 2010, 11:20 a.m. UTC | #14
On Sun, Nov 14, 2010 at 12:19 PM, Avi Kivity <avi@redhat.com> wrote:
> On 11/14/2010 01:05 PM, Avi Kivity wrote:
>>>
>>> I agree, but let's enable virtio-ioeventfd carefully because bad code
>>> is out there.
>>
>>
>> Sure.  Note as long as the thread waiting on ioeventfd doesn't consume too
>> much cpu, it will awaken quickly and we won't have the "transaction per
>> timeslice" effect.
>>
>> btw, what about virtio-blk with linux-aio?  Have you benchmarked that with
>> and without ioeventfd?
>>
>
> And, what about efficiency?  As in bits/cycle?

We are running benchmarks with this latest patch and will report results.

Stefan
Stefan Hajnoczi Dec. 1, 2010, 11:44 a.m. UTC | #15
On Mon, Nov 15, 2010 at 11:20 AM, Stefan Hajnoczi <stefanha@gmail.com> wrote:
> On Sun, Nov 14, 2010 at 12:19 PM, Avi Kivity <avi@redhat.com> wrote:
>> On 11/14/2010 01:05 PM, Avi Kivity wrote:
>>>>
>>>> I agree, but let's enable virtio-ioeventfd carefully because bad code
>>>> is out there.
>>>
>>>
>>> Sure.  Note as long as the thread waiting on ioeventfd doesn't consume too
>>> much cpu, it will awaken quickly and we won't have the "transaction per
>>> timeslice" effect.
>>>
>>> btw, what about virtio-blk with linux-aio?  Have you benchmarked that with
>>> and without ioeventfd?
>>>
>>
>> And, what about efficiency?  As in bits/cycle?
>
> We are running benchmarks with this latest patch and will report results.

Full results here (thanks to Khoa Huynh):

http://wiki.qemu.org/Features/VirtioIoeventfd

The host CPU utilization is scaled to 16 CPUs so a 2-3% reduction is
actually in the 32-48% range for a single CPU.

The guest CPU utilization numbers include an efficiency metric: %vcpu
per MB/sec.  Here we see significant improvements too.  Guests that
previously couldn't get more CPU work done now have regained some
breathing space.

Stefan
Avi Kivity Dec. 1, 2010, 12:30 p.m. UTC | #16
On 12/01/2010 01:44 PM, Stefan Hajnoczi wrote:
> >>
> >>  And, what about efficiency?  As in bits/cycle?
> >
> >  We are running benchmarks with this latest patch and will report results.
>
> Full results here (thanks to Khoa Huynh):
>
> http://wiki.qemu.org/Features/VirtioIoeventfd
>
> The host CPU utilization is scaled to 16 CPUs so a 2-3% reduction is
> actually in the 32-48% range for a single CPU.
>
> The guest CPU utilization numbers include an efficiency metric: %vcpu
> per MB/sec.  Here we see significant improvements too.  Guests that
> previously couldn't get more CPU work done now have regained some
> breathing space.

Thanks for those numbers.  The guest improvements were expected, but the 
host numbers surprised me.  Do you have an explanation as to why total 
host load should decrease?

Seems to me the host is doing more work, and doing it less efficiently 
(by ping-ponging requests to another thread).

In any case, looks like good improvement with no downsides.
Stefan Hajnoczi Dec. 1, 2010, 9:34 p.m. UTC | #17
On Wed, Dec 1, 2010 at 12:30 PM, Avi Kivity <avi@redhat.com> wrote:
> On 12/01/2010 01:44 PM, Stefan Hajnoczi wrote:
>>
>> >>
>> >>  And, what about efficiency?  As in bits/cycle?
>> >
>> >  We are running benchmarks with this latest patch and will report
>> > results.
>>
>> Full results here (thanks to Khoa Huynh):
>>
>> http://wiki.qemu.org/Features/VirtioIoeventfd
>>
>> The host CPU utilization is scaled to 16 CPUs so a 2-3% reduction is
>> actually in the 32-48% range for a single CPU.
>>
>> The guest CPU utilization numbers include an efficiency metric: %vcpu
>> per MB/sec.  Here we see significant improvements too.  Guests that
>> previously couldn't get more CPU work done now have regained some
>> breathing space.
>
> Thanks for those numbers.  The guest improvements were expected, but the
> host numbers surprised me.  Do you have an explanation as to why total host
> load should decrease?

The first vcpu does virtqueue kick - it holds the guest driver
vblk->lock across kick.  Before this kick completes a second vcpu
tries to acquire vblk->lock, finds it is contended, and spins.  So
we're burning CPU due to the long vblk->lock hold times.

With virtio-ioeventfd those kick times are reduced an there is less
contention on vblk->lock.

Stefan
Avi Kivity Dec. 6, 2010, 3:09 p.m. UTC | #18
On 12/01/2010 11:34 PM, Stefan Hajnoczi wrote:
> >>  The guest CPU utilization numbers include an efficiency metric: %vcpu
> >>  per MB/sec.  Here we see significant improvements too.  Guests that
> >>  previously couldn't get more CPU work done now have regained some
> >>  breathing space.
> >
> >  Thanks for those numbers.  The guest improvements were expected, but the
> >  host numbers surprised me.  Do you have an explanation as to why total host
> >  load should decrease?
>
> The first vcpu does virtqueue kick - it holds the guest driver
> vblk->lock across kick.  Before this kick completes a second vcpu
> tries to acquire vblk->lock, finds it is contended, and spins.  So
> we're burning CPU due to the long vblk->lock hold times.
>
> With virtio-ioeventfd those kick times are reduced an there is less
> contention on vblk->lock.

Makes sense.
diff mbox

Patch

diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c
index 549118d..436fc59 100644
--- a/hw/virtio-pci.c
+++ b/hw/virtio-pci.c
@@ -83,6 +83,10 @@ 
 /* Flags track per-device state like workarounds for quirks in older guests. */
 #define VIRTIO_PCI_FLAG_BUS_MASTER_BUG  (1 << 0)
 
+/* Performance improves when virtqueue kick processing is decoupled from the
+ * vcpu thread using ioeventfd for some devices. */
+#define VIRTIO_PCI_FLAG_USE_IOEVENTFD   (1 << 1)
+
 /* QEMU doesn't strictly need write barriers since everything runs in
  * lock-step.  We'll leave the calls to wmb() in though to make it obvious for
  * KVM or if kqemu gets SMP support.
@@ -179,12 +183,108 @@  static int virtio_pci_load_queue(void * opaque, int n, QEMUFile *f)
     return 0;
 }
 
+static int virtio_pci_set_host_notifier_ioeventfd(VirtIOPCIProxy *proxy, int n, bool assign)
+{
+    VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
+    EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
+    int r;
+    if (assign) {
+        r = event_notifier_init(notifier, 1);
+        if (r < 0) {
+            return r;
+        }
+        r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
+                                       proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
+                                       n, assign);
+        if (r < 0) {
+            event_notifier_cleanup(notifier);
+        }
+    } else {
+        r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
+                                       proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
+                                       n, assign);
+        if (r < 0) {
+            return r;
+        }
+        event_notifier_cleanup(notifier);
+    }
+    return r;
+}
+
+static void virtio_pci_host_notifier_read(void *opaque)
+{
+    VirtQueue *vq = opaque;
+    EventNotifier *n = virtio_queue_get_host_notifier(vq);
+    if (event_notifier_test_and_clear(n)) {
+        virtio_queue_notify_vq(vq);
+    }
+}
+
+static void virtio_pci_set_host_notifier_fd_handler(VirtIOPCIProxy *proxy, int n, bool assign)
+{
+    VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
+    EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
+    if (assign) {
+        qemu_set_fd_handler(event_notifier_get_fd(notifier),
+                            virtio_pci_host_notifier_read, NULL, vq);
+    } else {
+        qemu_set_fd_handler(event_notifier_get_fd(notifier),
+                            NULL, NULL, NULL);
+    }
+}
+
+static int virtio_pci_set_host_notifiers(VirtIOPCIProxy *proxy, bool assign)
+{
+    int n, r;
+
+    for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
+        if (!virtio_queue_get_num(proxy->vdev, n)) {
+            continue;
+        }
+
+        if (assign) {
+            r = virtio_pci_set_host_notifier_ioeventfd(proxy, n, true);
+            if (r < 0) {
+                goto assign_error;
+            }
+
+            virtio_pci_set_host_notifier_fd_handler(proxy, n, true);
+        } else {
+            virtio_pci_set_host_notifier_fd_handler(proxy, n, false);
+            virtio_pci_set_host_notifier_ioeventfd(proxy, n, false);
+        }
+    }
+    return 0;
+
+assign_error:
+    proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
+    while (--n >= 0) {
+        virtio_pci_set_host_notifier_fd_handler(proxy, n, false);
+        virtio_pci_set_host_notifier_ioeventfd(proxy, n, false);
+    }
+    return r;
+}
+
+static void virtio_pci_reset_vdev(VirtIOPCIProxy *proxy)
+{
+    /* Poke virtio device so it deassigns its host notifiers (if any) */
+    virtio_set_status(proxy->vdev, 0);
+
+    /* Now safely deassign our own host notifiers */
+    if (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) {
+        virtio_pci_set_host_notifiers(proxy, false);
+    }
+
+    virtio_reset(proxy->vdev);
+    msix_unuse_all_vectors(&proxy->pci_dev);
+}
+
 static void virtio_pci_reset(DeviceState *d)
 {
     VirtIOPCIProxy *proxy = container_of(d, VirtIOPCIProxy, pci_dev.qdev);
-    virtio_reset(proxy->vdev);
+    virtio_pci_reset_vdev(proxy);
     msix_reset(&proxy->pci_dev);
-    proxy->flags = 0;
+    proxy->flags &= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
 }
 
 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
@@ -209,11 +309,10 @@  static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
     case VIRTIO_PCI_QUEUE_PFN:
         pa = (target_phys_addr_t)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
         if (pa == 0) {
-            virtio_reset(proxy->vdev);
-            msix_unuse_all_vectors(&proxy->pci_dev);
-        }
-        else
+            virtio_pci_reset_vdev(proxy);
+        } else {
             virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
+        }
         break;
     case VIRTIO_PCI_QUEUE_SEL:
         if (val < VIRTIO_PCI_QUEUE_MAX)
@@ -223,10 +322,16 @@  static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
         virtio_queue_notify(vdev, val);
         break;
     case VIRTIO_PCI_STATUS:
-        virtio_set_status(vdev, val & 0xFF);
-        if (vdev->status == 0) {
-            virtio_reset(proxy->vdev);
-            msix_unuse_all_vectors(&proxy->pci_dev);
+        if ((val & VIRTIO_CONFIG_S_DRIVER_OK) &&
+            !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
+            (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD)) {
+            virtio_pci_set_host_notifiers(proxy, true);
+        }
+
+        if (val & 0xFF) {
+            virtio_set_status(vdev, val & 0xFF);
+        } else {
+            virtio_pci_reset_vdev(proxy);
         }
 
         /* Linux before 2.6.34 sets the device as OK without enabling
@@ -480,30 +585,12 @@  assign_error:
 static int virtio_pci_set_host_notifier(void *opaque, int n, bool assign)
 {
     VirtIOPCIProxy *proxy = opaque;
-    VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
-    EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
-    int r;
-    if (assign) {
-        r = event_notifier_init(notifier, 1);
-        if (r < 0) {
-            return r;
-        }
-        r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
-                                       proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
-                                       n, assign);
-        if (r < 0) {
-            event_notifier_cleanup(notifier);
-        }
+    if (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) {
+        virtio_pci_set_host_notifier_fd_handler(proxy, n, !assign);
+        return 0;
     } else {
-        r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
-                                       proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
-                                       n, assign);
-        if (r < 0) {
-            return r;
-        }
-        event_notifier_cleanup(notifier);
+        return virtio_pci_set_host_notifier_ioeventfd(proxy, n, assign);
     }
-    return r;
 }
 
 static const VirtIOBindings virtio_pci_bindings = {
@@ -702,6 +789,8 @@  static PCIDeviceInfo virtio_info[] = {
         .qdev.props = (Property[]) {
             DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
             DEFINE_BLOCK_PROPERTIES(VirtIOPCIProxy, block),
+            DEFINE_PROP_UINT32("flags", VirtIOPCIProxy, flags,
+                               VIRTIO_PCI_FLAG_USE_IOEVENTFD),
             DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
             DEFINE_VIRTIO_BLK_FEATURES(VirtIOPCIProxy, host_features),
             DEFINE_PROP_END_OF_LIST(),
@@ -714,6 +803,8 @@  static PCIDeviceInfo virtio_info[] = {
         .exit       = virtio_net_exit_pci,
         .romfile    = "pxe-virtio.bin",
         .qdev.props = (Property[]) {
+            DEFINE_PROP_UINT32("flags", VirtIOPCIProxy, flags,
+                               VIRTIO_PCI_FLAG_USE_IOEVENTFD),
             DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
             DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
             DEFINE_NIC_PROPERTIES(VirtIOPCIProxy, nic),
diff --git a/hw/virtio.c b/hw/virtio.c
index a2a657e..f588e29 100644
--- a/hw/virtio.c
+++ b/hw/virtio.c
@@ -582,6 +582,11 @@  void virtio_queue_notify(VirtIODevice *vdev, int n)
     }
 }
 
+void virtio_queue_notify_vq(VirtQueue *vq)
+{
+    virtio_queue_notify(vq->vdev, vq - vq->vdev->vq);
+}
+
 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
 {
     return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
diff --git a/hw/virtio.h b/hw/virtio.h
index 02fa312..5ae521c 100644
--- a/hw/virtio.h
+++ b/hw/virtio.h
@@ -219,5 +219,6 @@  void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx);
 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n);
 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
+void virtio_queue_notify_vq(VirtQueue *vq);
 void virtio_irq(VirtQueue *vq);
 #endif