diff mbox series

[RFC,v9,14/23] vhost: add vhost_svq_poll

Message ID 20220706184008.1649478-15-eperezma@redhat.com
State New
Headers show
Series Net Control VQ support in SVQ | expand

Commit Message

Eugenio Perez Martin July 6, 2022, 6:39 p.m. UTC
It allows the Shadow Control VirtQueue to wait the device to use the commands
that restore the net device state after a live migration.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 hw/virtio/vhost-shadow-virtqueue.h |  1 +
 hw/virtio/vhost-shadow-virtqueue.c | 54 ++++++++++++++++++++++++++++--
 2 files changed, 52 insertions(+), 3 deletions(-)

Comments

Jason Wang July 11, 2022, 9:19 a.m. UTC | #1
在 2022/7/7 02:39, Eugenio Pérez 写道:
> It allows the Shadow Control VirtQueue to wait the device to use the commands
> that restore the net device state after a live migration.
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
>   hw/virtio/vhost-shadow-virtqueue.h |  1 +
>   hw/virtio/vhost-shadow-virtqueue.c | 54 ++++++++++++++++++++++++++++--
>   2 files changed, 52 insertions(+), 3 deletions(-)
>
> diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
> index d01d2370db..c8668fbdd6 100644
> --- a/hw/virtio/vhost-shadow-virtqueue.h
> +++ b/hw/virtio/vhost-shadow-virtqueue.h
> @@ -100,6 +100,7 @@ void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
>                            const VirtQueueElement *elem, uint32_t len);
>   int vhost_svq_inject(VhostShadowVirtqueue *svq, const struct iovec *iov,
>                        size_t out_num, size_t in_num, void *opaque);
> +ssize_t vhost_svq_poll(VhostShadowVirtqueue *svq);
>   void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd);
>   void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd);
>   void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
> diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
> index bd9e34b413..ed7f1d0bc9 100644
> --- a/hw/virtio/vhost-shadow-virtqueue.c
> +++ b/hw/virtio/vhost-shadow-virtqueue.c
> @@ -10,6 +10,8 @@
>   #include "qemu/osdep.h"
>   #include "hw/virtio/vhost-shadow-virtqueue.h"
>   
> +#include <glib/gpoll.h>
> +
>   #include "qemu/error-report.h"
>   #include "qapi/error.h"
>   #include "qemu/main-loop.h"
> @@ -490,10 +492,11 @@ void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
>       }
>   }
>   
> -static void vhost_svq_flush(VhostShadowVirtqueue *svq,
> -                            bool check_for_avail_queue)
> +static size_t vhost_svq_flush(VhostShadowVirtqueue *svq,
> +                              bool check_for_avail_queue)
>   {
>       VirtQueue *vq = svq->vq;
> +    size_t ret = 0;
>   
>       /* Forward as many used buffers as possible. */
>       do {
> @@ -510,7 +513,7 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
>                            "More than %u used buffers obtained in a %u size SVQ",
>                            i, svq->vring.num);
>                   virtqueue_flush(vq, svq->vring.num);
> -                return;
> +                return ret;
>               }
>   
>               svq_elem = vhost_svq_get_buf(svq, &len);
> @@ -520,6 +523,7 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
>   
>               elem = g_steal_pointer(&svq_elem.opaque);
>               virtqueue_fill(vq, elem, len, i++);
> +            ret++;
>           }
>   
>           virtqueue_flush(vq, i);
> @@ -533,6 +537,50 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
>               vhost_handle_guest_kick(svq);
>           }
>       } while (!vhost_svq_enable_notification(svq));
> +
> +    return ret;
> +}
> +
> +/**
> + * Poll the SVQ for device used buffers.
> + *
> + * This function race with main event loop SVQ polling, so extra
> + * synchronization is needed.
> + *
> + * Return the number of descriptors read from the device.
> + */
> +ssize_t vhost_svq_poll(VhostShadowVirtqueue *svq)
> +{
> +    int fd = event_notifier_get_fd(&svq->hdev_call);
> +    GPollFD poll_fd = {
> +        .fd = fd,
> +        .events = G_IO_IN,
> +    };
> +    assert(fd >= 0);
> +    int r = g_poll(&poll_fd, 1, -1);


Any reason we can't simply (busy) polling the used ring here? It might 
help to reduce the latency (and it is what kernel driver uses).

Thanks


> +
> +    if (unlikely(r < 0)) {
> +        error_report("Cannot poll device call fd "G_POLLFD_FORMAT": (%d) %s",
> +                     poll_fd.fd, errno, g_strerror(errno));
> +        return -errno;
> +    }
> +
> +    if (r == 0) {
> +        return 0;
> +    }
> +
> +    if (unlikely(poll_fd.revents & ~(G_IO_IN))) {
> +        error_report(
> +            "Error polling device call fd "G_POLLFD_FORMAT": revents=%d",
> +            poll_fd.fd, poll_fd.revents);
> +        return -1;
> +    }
> +
> +    /*
> +     * Max return value of vhost_svq_flush is (uint16_t)-1, so it's safe to
> +     * convert to ssize_t.
> +     */
> +    return vhost_svq_flush(svq, false);
>   }
>   
>   /**
Eugenio Perez Martin July 11, 2022, 5:52 p.m. UTC | #2
On Mon, Jul 11, 2022 at 11:19 AM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2022/7/7 02:39, Eugenio Pérez 写道:
> > It allows the Shadow Control VirtQueue to wait the device to use the commands
> > that restore the net device state after a live migration.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> >   hw/virtio/vhost-shadow-virtqueue.h |  1 +
> >   hw/virtio/vhost-shadow-virtqueue.c | 54 ++++++++++++++++++++++++++++--
> >   2 files changed, 52 insertions(+), 3 deletions(-)
> >
> > diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
> > index d01d2370db..c8668fbdd6 100644
> > --- a/hw/virtio/vhost-shadow-virtqueue.h
> > +++ b/hw/virtio/vhost-shadow-virtqueue.h
> > @@ -100,6 +100,7 @@ void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
> >                            const VirtQueueElement *elem, uint32_t len);
> >   int vhost_svq_inject(VhostShadowVirtqueue *svq, const struct iovec *iov,
> >                        size_t out_num, size_t in_num, void *opaque);
> > +ssize_t vhost_svq_poll(VhostShadowVirtqueue *svq);
> >   void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd);
> >   void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd);
> >   void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
> > diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
> > index bd9e34b413..ed7f1d0bc9 100644
> > --- a/hw/virtio/vhost-shadow-virtqueue.c
> > +++ b/hw/virtio/vhost-shadow-virtqueue.c
> > @@ -10,6 +10,8 @@
> >   #include "qemu/osdep.h"
> >   #include "hw/virtio/vhost-shadow-virtqueue.h"
> >
> > +#include <glib/gpoll.h>
> > +
> >   #include "qemu/error-report.h"
> >   #include "qapi/error.h"
> >   #include "qemu/main-loop.h"
> > @@ -490,10 +492,11 @@ void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
> >       }
> >   }
> >
> > -static void vhost_svq_flush(VhostShadowVirtqueue *svq,
> > -                            bool check_for_avail_queue)
> > +static size_t vhost_svq_flush(VhostShadowVirtqueue *svq,
> > +                              bool check_for_avail_queue)
> >   {
> >       VirtQueue *vq = svq->vq;
> > +    size_t ret = 0;
> >
> >       /* Forward as many used buffers as possible. */
> >       do {
> > @@ -510,7 +513,7 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
> >                            "More than %u used buffers obtained in a %u size SVQ",
> >                            i, svq->vring.num);
> >                   virtqueue_flush(vq, svq->vring.num);
> > -                return;
> > +                return ret;
> >               }
> >
> >               svq_elem = vhost_svq_get_buf(svq, &len);
> > @@ -520,6 +523,7 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
> >
> >               elem = g_steal_pointer(&svq_elem.opaque);
> >               virtqueue_fill(vq, elem, len, i++);
> > +            ret++;
> >           }
> >
> >           virtqueue_flush(vq, i);
> > @@ -533,6 +537,50 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
> >               vhost_handle_guest_kick(svq);
> >           }
> >       } while (!vhost_svq_enable_notification(svq));
> > +
> > +    return ret;
> > +}
> > +
> > +/**
> > + * Poll the SVQ for device used buffers.
> > + *
> > + * This function race with main event loop SVQ polling, so extra
> > + * synchronization is needed.
> > + *
> > + * Return the number of descriptors read from the device.
> > + */
> > +ssize_t vhost_svq_poll(VhostShadowVirtqueue *svq)
> > +{
> > +    int fd = event_notifier_get_fd(&svq->hdev_call);
> > +    GPollFD poll_fd = {
> > +        .fd = fd,
> > +        .events = G_IO_IN,
> > +    };
> > +    assert(fd >= 0);
> > +    int r = g_poll(&poll_fd, 1, -1);
>
>
> Any reason we can't simply (busy) polling the used ring here? It might
> help to reduce the latency (and it is what kernel driver uses).
>

Yes, I'll change to a busy polling. I forgot to change it.

Thanks!

> Thanks
>
>
> > +
> > +    if (unlikely(r < 0)) {
> > +        error_report("Cannot poll device call fd "G_POLLFD_FORMAT": (%d) %s",
> > +                     poll_fd.fd, errno, g_strerror(errno));
> > +        return -errno;
> > +    }
> > +
> > +    if (r == 0) {
> > +        return 0;
> > +    }
> > +
> > +    if (unlikely(poll_fd.revents & ~(G_IO_IN))) {
> > +        error_report(
> > +            "Error polling device call fd "G_POLLFD_FORMAT": revents=%d",
> > +            poll_fd.fd, poll_fd.revents);
> > +        return -1;
> > +    }
> > +
> > +    /*
> > +     * Max return value of vhost_svq_flush is (uint16_t)-1, so it's safe to
> > +     * convert to ssize_t.
> > +     */
> > +    return vhost_svq_flush(svq, false);
> >   }
> >
> >   /**
>
diff mbox series

Patch

diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index d01d2370db..c8668fbdd6 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -100,6 +100,7 @@  void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
                          const VirtQueueElement *elem, uint32_t len);
 int vhost_svq_inject(VhostShadowVirtqueue *svq, const struct iovec *iov,
                      size_t out_num, size_t in_num, void *opaque);
+ssize_t vhost_svq_poll(VhostShadowVirtqueue *svq);
 void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd);
 void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd);
 void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index bd9e34b413..ed7f1d0bc9 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -10,6 +10,8 @@ 
 #include "qemu/osdep.h"
 #include "hw/virtio/vhost-shadow-virtqueue.h"
 
+#include <glib/gpoll.h>
+
 #include "qemu/error-report.h"
 #include "qapi/error.h"
 #include "qemu/main-loop.h"
@@ -490,10 +492,11 @@  void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
     }
 }
 
-static void vhost_svq_flush(VhostShadowVirtqueue *svq,
-                            bool check_for_avail_queue)
+static size_t vhost_svq_flush(VhostShadowVirtqueue *svq,
+                              bool check_for_avail_queue)
 {
     VirtQueue *vq = svq->vq;
+    size_t ret = 0;
 
     /* Forward as many used buffers as possible. */
     do {
@@ -510,7 +513,7 @@  static void vhost_svq_flush(VhostShadowVirtqueue *svq,
                          "More than %u used buffers obtained in a %u size SVQ",
                          i, svq->vring.num);
                 virtqueue_flush(vq, svq->vring.num);
-                return;
+                return ret;
             }
 
             svq_elem = vhost_svq_get_buf(svq, &len);
@@ -520,6 +523,7 @@  static void vhost_svq_flush(VhostShadowVirtqueue *svq,
 
             elem = g_steal_pointer(&svq_elem.opaque);
             virtqueue_fill(vq, elem, len, i++);
+            ret++;
         }
 
         virtqueue_flush(vq, i);
@@ -533,6 +537,50 @@  static void vhost_svq_flush(VhostShadowVirtqueue *svq,
             vhost_handle_guest_kick(svq);
         }
     } while (!vhost_svq_enable_notification(svq));
+
+    return ret;
+}
+
+/**
+ * Poll the SVQ for device used buffers.
+ *
+ * This function race with main event loop SVQ polling, so extra
+ * synchronization is needed.
+ *
+ * Return the number of descriptors read from the device.
+ */
+ssize_t vhost_svq_poll(VhostShadowVirtqueue *svq)
+{
+    int fd = event_notifier_get_fd(&svq->hdev_call);
+    GPollFD poll_fd = {
+        .fd = fd,
+        .events = G_IO_IN,
+    };
+    assert(fd >= 0);
+    int r = g_poll(&poll_fd, 1, -1);
+
+    if (unlikely(r < 0)) {
+        error_report("Cannot poll device call fd "G_POLLFD_FORMAT": (%d) %s",
+                     poll_fd.fd, errno, g_strerror(errno));
+        return -errno;
+    }
+
+    if (r == 0) {
+        return 0;
+    }
+
+    if (unlikely(poll_fd.revents & ~(G_IO_IN))) {
+        error_report(
+            "Error polling device call fd "G_POLLFD_FORMAT": revents=%d",
+            poll_fd.fd, poll_fd.revents);
+        return -1;
+    }
+
+    /*
+     * Max return value of vhost_svq_flush is (uint16_t)-1, so it's safe to
+     * convert to ssize_t.
+     */
+    return vhost_svq_flush(svq, false);
 }
 
 /**