diff mbox series

[RFC,v9,21/23] vdpa: Add vhost_vdpa_start_control_svq

Message ID 20220706184008.1649478-22-eperezma@redhat.com
State New
Headers show
Series Net Control VQ support in SVQ | expand

Commit Message

Eugenio Perez Martin July 6, 2022, 6:40 p.m. UTC
As a first step we only enable CVQ first than others. Future patches add
state restore.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 net/vhost-vdpa.c | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

Comments

Jason Wang July 12, 2022, 7:26 a.m. UTC | #1
在 2022/7/7 02:40, Eugenio Pérez 写道:
> As a first step we only enable CVQ first than others. Future patches add
> state restore.
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
>   net/vhost-vdpa.c | 19 +++++++++++++++++++
>   1 file changed, 19 insertions(+)
>
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index e415cc8de5..77d013833f 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -370,6 +370,24 @@ static CVQElement *vhost_vdpa_cvq_alloc_elem(VhostVDPAState *s,
>       return g_steal_pointer(&cvq_elem);
>   }
>   
> +static int vhost_vdpa_start_control_svq(VhostShadowVirtqueue *svq,
> +                                        void *opaque)
> +{
> +    struct vhost_vring_state state = {
> +        .index = virtio_get_queue_index(svq->vq),
> +        .num = 1,
> +    };
> +    VhostVDPAState *s = opaque;
> +    struct vhost_dev *dev = s->vhost_vdpa.dev;
> +    struct vhost_vdpa *v = dev->opaque;
> +    int r;
> +
> +    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
> +
> +    r = ioctl(v->device_fd, VHOST_VDPA_SET_VRING_ENABLE, &state);
> +    return r < 0 ? -errno : r;
> +}
> +
>   /**
>    * iov_size with an upper limit. It's assumed UINT64_MAX is an invalid
>    * iov_size.
> @@ -554,6 +572,7 @@ static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
>       .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
>       .used_handler = vhost_vdpa_net_handle_ctrl_used,
>       .detach_handler = vhost_vdpa_net_handle_ctrl_detach,
> +    .start = vhost_vdpa_start_control_svq,
>   };


I wonder if vhost_net_start() is something better than here. It knows 
all virtqueues and it can do whatever it wants, we just need to make 
shadow virtqueue visible there?

Thanks


>   
>   static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
Eugenio Perez Martin July 17, 2022, 10:30 a.m. UTC | #2
On Tue, Jul 12, 2022 at 9:26 AM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2022/7/7 02:40, Eugenio Pérez 写道:
> > As a first step we only enable CVQ first than others. Future patches add
> > state restore.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> >   net/vhost-vdpa.c | 19 +++++++++++++++++++
> >   1 file changed, 19 insertions(+)
> >
> > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > index e415cc8de5..77d013833f 100644
> > --- a/net/vhost-vdpa.c
> > +++ b/net/vhost-vdpa.c
> > @@ -370,6 +370,24 @@ static CVQElement *vhost_vdpa_cvq_alloc_elem(VhostVDPAState *s,
> >       return g_steal_pointer(&cvq_elem);
> >   }
> >
> > +static int vhost_vdpa_start_control_svq(VhostShadowVirtqueue *svq,
> > +                                        void *opaque)
> > +{
> > +    struct vhost_vring_state state = {
> > +        .index = virtio_get_queue_index(svq->vq),
> > +        .num = 1,
> > +    };
> > +    VhostVDPAState *s = opaque;
> > +    struct vhost_dev *dev = s->vhost_vdpa.dev;
> > +    struct vhost_vdpa *v = dev->opaque;
> > +    int r;
> > +
> > +    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
> > +
> > +    r = ioctl(v->device_fd, VHOST_VDPA_SET_VRING_ENABLE, &state);
> > +    return r < 0 ? -errno : r;
> > +}
> > +
> >   /**
> >    * iov_size with an upper limit. It's assumed UINT64_MAX is an invalid
> >    * iov_size.
> > @@ -554,6 +572,7 @@ static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
> >       .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
> >       .used_handler = vhost_vdpa_net_handle_ctrl_used,
> >       .detach_handler = vhost_vdpa_net_handle_ctrl_detach,
> > +    .start = vhost_vdpa_start_control_svq,
> >   };
>
>
> I wonder if vhost_net_start() is something better than here. It knows
> all virtqueues and it can do whatever it wants, we just need to make
> shadow virtqueue visible there?
>

But this needs to be called after the set of DRIVER_OK and before
VHOST_VRING_ENABLE.

I also think vhost_net_start is a better place, but to achieve it we
need to split vhost_vdpa_dev_start to call VHOST_VRING_ENABLE after
it. Maybe through .vhost_set_vring_enable? Why wasn't it done that way
from the beginning?

After that, we need to modify the vhost_net_start sequence. Currently,
vhost_net is calling VHOST_VRING_ENABLE right after each vhost_dev
vhost_dev_start. Vdpa would need to call vhost_dev_start for each
device, and then call .vhost_set_vring_enable for each device again.
And to add the vdpa_cvq_start in the middle.

It's not a lot of code change but I think we're safer self containing
it in vdpa at the moment, and then we can move to vhost_net
immediately for the development cycle. If the vhost-user backend
should support this other sequence immediately, I'm ok with sending a
new version before Tuesday.

Thanks!
Eugenio Perez Martin July 17, 2022, 11 a.m. UTC | #3
On Sun, Jul 17, 2022 at 12:30 PM Eugenio Perez Martin
<eperezma@redhat.com> wrote:
>
> On Tue, Jul 12, 2022 at 9:26 AM Jason Wang <jasowang@redhat.com> wrote:
> >
> >
> > 在 2022/7/7 02:40, Eugenio Pérez 写道:
> > > As a first step we only enable CVQ first than others. Future patches add
> > > state restore.
> > >
> > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > ---
> > >   net/vhost-vdpa.c | 19 +++++++++++++++++++
> > >   1 file changed, 19 insertions(+)
> > >
> > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > index e415cc8de5..77d013833f 100644
> > > --- a/net/vhost-vdpa.c
> > > +++ b/net/vhost-vdpa.c
> > > @@ -370,6 +370,24 @@ static CVQElement *vhost_vdpa_cvq_alloc_elem(VhostVDPAState *s,
> > >       return g_steal_pointer(&cvq_elem);
> > >   }
> > >
> > > +static int vhost_vdpa_start_control_svq(VhostShadowVirtqueue *svq,
> > > +                                        void *opaque)
> > > +{
> > > +    struct vhost_vring_state state = {
> > > +        .index = virtio_get_queue_index(svq->vq),
> > > +        .num = 1,
> > > +    };
> > > +    VhostVDPAState *s = opaque;
> > > +    struct vhost_dev *dev = s->vhost_vdpa.dev;
> > > +    struct vhost_vdpa *v = dev->opaque;
> > > +    int r;
> > > +
> > > +    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
> > > +
> > > +    r = ioctl(v->device_fd, VHOST_VDPA_SET_VRING_ENABLE, &state);
> > > +    return r < 0 ? -errno : r;
> > > +}
> > > +
> > >   /**
> > >    * iov_size with an upper limit. It's assumed UINT64_MAX is an invalid
> > >    * iov_size.
> > > @@ -554,6 +572,7 @@ static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
> > >       .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
> > >       .used_handler = vhost_vdpa_net_handle_ctrl_used,
> > >       .detach_handler = vhost_vdpa_net_handle_ctrl_detach,
> > > +    .start = vhost_vdpa_start_control_svq,
> > >   };
> >
> >
> > I wonder if vhost_net_start() is something better than here. It knows
> > all virtqueues and it can do whatever it wants, we just need to make
> > shadow virtqueue visible there?
> >
>
> But this needs to be called after the set of DRIVER_OK and before
> VHOST_VRING_ENABLE.
>
> I also think vhost_net_start is a better place, but to achieve it we
> need to split vhost_vdpa_dev_start to call VHOST_VRING_ENABLE after
> it. Maybe through .vhost_set_vring_enable? Why wasn't it done that way
> from the beginning?
>
> After that, we need to modify the vhost_net_start sequence. Currently,
> vhost_net is calling VHOST_VRING_ENABLE right after each vhost_dev
> vhost_dev_start. Vdpa would need to call vhost_dev_start for each
> device, and then call .vhost_set_vring_enable for each device again.
> And to add the vdpa_cvq_start in the middle.
>
> It's not a lot of code change but I think we're safer self containing
> it in vdpa at the moment, and then we can move to vhost_net
> immediately for the development cycle. If the vhost-user backend
> should support this other sequence immediately, I'm ok with sending a
> new version before Tuesday.
>

+= Moved to vhost_vdpa in the already sent RFC [1].

[1] https://lists.nongnu.org/archive/html/qemu-devel/2022-07/msg02856.html
diff mbox series

Patch

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index e415cc8de5..77d013833f 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -370,6 +370,24 @@  static CVQElement *vhost_vdpa_cvq_alloc_elem(VhostVDPAState *s,
     return g_steal_pointer(&cvq_elem);
 }
 
+static int vhost_vdpa_start_control_svq(VhostShadowVirtqueue *svq,
+                                        void *opaque)
+{
+    struct vhost_vring_state state = {
+        .index = virtio_get_queue_index(svq->vq),
+        .num = 1,
+    };
+    VhostVDPAState *s = opaque;
+    struct vhost_dev *dev = s->vhost_vdpa.dev;
+    struct vhost_vdpa *v = dev->opaque;
+    int r;
+
+    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
+
+    r = ioctl(v->device_fd, VHOST_VDPA_SET_VRING_ENABLE, &state);
+    return r < 0 ? -errno : r;
+}
+
 /**
  * iov_size with an upper limit. It's assumed UINT64_MAX is an invalid
  * iov_size.
@@ -554,6 +572,7 @@  static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
     .used_handler = vhost_vdpa_net_handle_ctrl_used,
     .detach_handler = vhost_vdpa_net_handle_ctrl_detach,
+    .start = vhost_vdpa_start_control_svq,
 };
 
 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,