diff mbox series

[v5,2/6] vdpa: Allocate SVQ unconditionally

Message ID 20221011104154.1209338-3-eperezma@redhat.com
State New
Headers show
Series ASID support in vhost-vdpa net | expand

Commit Message

Eugenio Perez Martin Oct. 11, 2022, 10:41 a.m. UTC
SVQ may run or not in a device depending on runtime conditions (for
example, if the device can move CVQ to its own group or not).

Allocate the resources unconditionally, and decide later if to use them
or not.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 hw/virtio/vhost-vdpa.c | 33 +++++++++++++++------------------
 1 file changed, 15 insertions(+), 18 deletions(-)

Comments

Michael S. Tsirkin Oct. 31, 2022, 8:20 a.m. UTC | #1
On Tue, Oct 11, 2022 at 12:41:50PM +0200, Eugenio Pérez wrote:
> SVQ may run or not in a device depending on runtime conditions (for
> example, if the device can move CVQ to its own group or not).
> 
> Allocate the resources unconditionally, and decide later if to use them
> or not.
> 
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>

I applied this for now but I really dislike it that we are wasting
resources like this.

Can I just drop this patch from the series? It looks like things
will just work anyway ...

I know, when one works on a feature it seems like everyone should
enable it - but the reality is qemu already works quite well for
most users and it is our resposibility to first do no harm.


> ---
>  hw/virtio/vhost-vdpa.c | 33 +++++++++++++++------------------
>  1 file changed, 15 insertions(+), 18 deletions(-)
> 
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 7f0ff4df5b..d966966131 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -410,6 +410,21 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
>      int r;
>      bool ok;
>  
> +    shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
> +    for (unsigned n = 0; n < hdev->nvqs; ++n) {
> +        g_autoptr(VhostShadowVirtqueue) svq;
> +
> +        svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
> +                            v->shadow_vq_ops_opaque);
> +        if (unlikely(!svq)) {
> +            error_setg(errp, "Cannot create svq %u", n);
> +            return -1;
> +        }
> +        g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
> +    }
> +
> +    v->shadow_vqs = g_steal_pointer(&shadow_vqs);
> +
>      if (!v->shadow_vqs_enabled) {
>          return 0;
>      }
> @@ -426,20 +441,6 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
>          return -1;
>      }
>  
> -    shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
> -    for (unsigned n = 0; n < hdev->nvqs; ++n) {
> -        g_autoptr(VhostShadowVirtqueue) svq;
> -
> -        svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
> -                            v->shadow_vq_ops_opaque);
> -        if (unlikely(!svq)) {
> -            error_setg(errp, "Cannot create svq %u", n);
> -            return -1;
> -        }
> -        g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
> -    }
> -
> -    v->shadow_vqs = g_steal_pointer(&shadow_vqs);
>      return 0;
>  }
>  
> @@ -580,10 +581,6 @@ static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
>      struct vhost_vdpa *v = dev->opaque;
>      size_t idx;
>  
> -    if (!v->shadow_vqs) {
> -        return;
> -    }
> -
>      for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
>          vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
>      }
> -- 
> 2.31.1
Eugenio Perez Martin Oct. 31, 2022, 11:56 a.m. UTC | #2
On Mon, Oct 31, 2022 at 9:21 AM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Tue, Oct 11, 2022 at 12:41:50PM +0200, Eugenio Pérez wrote:
> > SVQ may run or not in a device depending on runtime conditions (for
> > example, if the device can move CVQ to its own group or not).
> >
> > Allocate the resources unconditionally, and decide later if to use them
> > or not.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
>
> I applied this for now but I really dislike it that we are wasting
> resources like this.
>
> Can I just drop this patch from the series? It looks like things
> will just work anyway ...
>

It will not work simply dropping this patch, because new code expects
SVQ vrings to be already allocated. But that is doable with more work.

> I know, when one works on a feature it seems like everyone should
> enable it - but the reality is qemu already works quite well for
> most users and it is our resposibility to first do no harm.
>

I agree, but then it is better to drop this series entirely for this
merge window. I think it is justified to add it at the beginning of
the next merge window, and to give more time for testing and adding
more features actually.

However, I think shadow CVQ should start by default as long as the
device has the right set of both virtio and vdpa features. Otherwise,
we need another cmdline parameter, something like x-cvq-svq, and the
update of other layers like libvirt.

Thanks!

>
> > ---
> >  hw/virtio/vhost-vdpa.c | 33 +++++++++++++++------------------
> >  1 file changed, 15 insertions(+), 18 deletions(-)
> >
> > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> > index 7f0ff4df5b..d966966131 100644
> > --- a/hw/virtio/vhost-vdpa.c
> > +++ b/hw/virtio/vhost-vdpa.c
> > @@ -410,6 +410,21 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
> >      int r;
> >      bool ok;
> >
> > +    shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
> > +    for (unsigned n = 0; n < hdev->nvqs; ++n) {
> > +        g_autoptr(VhostShadowVirtqueue) svq;
> > +
> > +        svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
> > +                            v->shadow_vq_ops_opaque);
> > +        if (unlikely(!svq)) {
> > +            error_setg(errp, "Cannot create svq %u", n);
> > +            return -1;
> > +        }
> > +        g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
> > +    }
> > +
> > +    v->shadow_vqs = g_steal_pointer(&shadow_vqs);
> > +
> >      if (!v->shadow_vqs_enabled) {
> >          return 0;
> >      }
> > @@ -426,20 +441,6 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
> >          return -1;
> >      }
> >
> > -    shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
> > -    for (unsigned n = 0; n < hdev->nvqs; ++n) {
> > -        g_autoptr(VhostShadowVirtqueue) svq;
> > -
> > -        svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
> > -                            v->shadow_vq_ops_opaque);
> > -        if (unlikely(!svq)) {
> > -            error_setg(errp, "Cannot create svq %u", n);
> > -            return -1;
> > -        }
> > -        g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
> > -    }
> > -
> > -    v->shadow_vqs = g_steal_pointer(&shadow_vqs);
> >      return 0;
> >  }
> >
> > @@ -580,10 +581,6 @@ static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
> >      struct vhost_vdpa *v = dev->opaque;
> >      size_t idx;
> >
> > -    if (!v->shadow_vqs) {
> > -        return;
> > -    }
> > -
> >      for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
> >          vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
> >      }
> > --
> > 2.31.1
>
Michael S. Tsirkin Oct. 31, 2022, 12:24 p.m. UTC | #3
On Mon, Oct 31, 2022 at 12:56:06PM +0100, Eugenio Perez Martin wrote:
> On Mon, Oct 31, 2022 at 9:21 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Tue, Oct 11, 2022 at 12:41:50PM +0200, Eugenio Pérez wrote:
> > > SVQ may run or not in a device depending on runtime conditions (for
> > > example, if the device can move CVQ to its own group or not).
> > >
> > > Allocate the resources unconditionally, and decide later if to use them
> > > or not.
> > >
> > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> >
> > I applied this for now but I really dislike it that we are wasting
> > resources like this.
> >
> > Can I just drop this patch from the series? It looks like things
> > will just work anyway ...
> >
> 
> It will not work simply dropping this patch, because new code expects
> SVQ vrings to be already allocated. But that is doable with more work.
> 
> > I know, when one works on a feature it seems like everyone should
> > enable it - but the reality is qemu already works quite well for
> > most users and it is our resposibility to first do no harm.
> >
> 
> I agree, but then it is better to drop this series entirely for this
> merge window. I think it is justified to add it at the beginning of
> the next merge window, and to give more time for testing and adding
> more features actually.

Not sure what "then" means. You tell me - should I drop it?

> However, I think shadow CVQ should start by default as long as the
> device has the right set of both virtio and vdpa features. Otherwise,
> we need another cmdline parameter, something like x-cvq-svq, and the
> update of other layers like libvirt.
> 
> Thanks!

OK maybe that is not too bad.


> >
> > > ---
> > >  hw/virtio/vhost-vdpa.c | 33 +++++++++++++++------------------
> > >  1 file changed, 15 insertions(+), 18 deletions(-)
> > >
> > > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> > > index 7f0ff4df5b..d966966131 100644
> > > --- a/hw/virtio/vhost-vdpa.c
> > > +++ b/hw/virtio/vhost-vdpa.c
> > > @@ -410,6 +410,21 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
> > >      int r;
> > >      bool ok;
> > >
> > > +    shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
> > > +    for (unsigned n = 0; n < hdev->nvqs; ++n) {
> > > +        g_autoptr(VhostShadowVirtqueue) svq;
> > > +
> > > +        svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
> > > +                            v->shadow_vq_ops_opaque);
> > > +        if (unlikely(!svq)) {
> > > +            error_setg(errp, "Cannot create svq %u", n);
> > > +            return -1;
> > > +        }
> > > +        g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
> > > +    }
> > > +
> > > +    v->shadow_vqs = g_steal_pointer(&shadow_vqs);
> > > +
> > >      if (!v->shadow_vqs_enabled) {
> > >          return 0;
> > >      }
> > > @@ -426,20 +441,6 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
> > >          return -1;
> > >      }
> > >
> > > -    shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
> > > -    for (unsigned n = 0; n < hdev->nvqs; ++n) {
> > > -        g_autoptr(VhostShadowVirtqueue) svq;
> > > -
> > > -        svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
> > > -                            v->shadow_vq_ops_opaque);
> > > -        if (unlikely(!svq)) {
> > > -            error_setg(errp, "Cannot create svq %u", n);
> > > -            return -1;
> > > -        }
> > > -        g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
> > > -    }
> > > -
> > > -    v->shadow_vqs = g_steal_pointer(&shadow_vqs);
> > >      return 0;
> > >  }
> > >
> > > @@ -580,10 +581,6 @@ static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
> > >      struct vhost_vdpa *v = dev->opaque;
> > >      size_t idx;
> > >
> > > -    if (!v->shadow_vqs) {
> > > -        return;
> > > -    }
> > > -
> > >      for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
> > >          vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
> > >      }
> > > --
> > > 2.31.1
> >
Eugenio Perez Martin Oct. 31, 2022, 12:34 p.m. UTC | #4
On Mon, Oct 31, 2022 at 1:25 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Mon, Oct 31, 2022 at 12:56:06PM +0100, Eugenio Perez Martin wrote:
> > On Mon, Oct 31, 2022 at 9:21 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > >
> > > On Tue, Oct 11, 2022 at 12:41:50PM +0200, Eugenio Pérez wrote:
> > > > SVQ may run or not in a device depending on runtime conditions (for
> > > > example, if the device can move CVQ to its own group or not).
> > > >
> > > > Allocate the resources unconditionally, and decide later if to use them
> > > > or not.
> > > >
> > > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > >
> > > I applied this for now but I really dislike it that we are wasting
> > > resources like this.
> > >
> > > Can I just drop this patch from the series? It looks like things
> > > will just work anyway ...
> > >
> >
> > It will not work simply dropping this patch, because new code expects
> > SVQ vrings to be already allocated. But that is doable with more work.
> >
> > > I know, when one works on a feature it seems like everyone should
> > > enable it - but the reality is qemu already works quite well for
> > > most users and it is our resposibility to first do no harm.
> > >
> >
> > I agree, but then it is better to drop this series entirely for this
> > merge window. I think it is justified to add it at the beginning of
> > the next merge window, and to give more time for testing and adding
> > more features actually.
>
> Not sure what "then" means. You tell me - should I drop it?
>

Yes, I think it is better to drop it for this merge window, since it
is possible to both not to allocate SVQ unconditionally and to improve
the conditions where the shadow CVQ can be enabled.

> > However, I think shadow CVQ should start by default as long as the
> > device has the right set of both virtio and vdpa features. Otherwise,
> > we need another cmdline parameter, something like x-cvq-svq, and the
> > update of other layers like libvirt.
> >
> > Thanks!
>
> OK maybe that is not too bad.
>

So it would be more preferable to add more parameters?

>
> > >
> > > > ---
> > > >  hw/virtio/vhost-vdpa.c | 33 +++++++++++++++------------------
> > > >  1 file changed, 15 insertions(+), 18 deletions(-)
> > > >
> > > > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> > > > index 7f0ff4df5b..d966966131 100644
> > > > --- a/hw/virtio/vhost-vdpa.c
> > > > +++ b/hw/virtio/vhost-vdpa.c
> > > > @@ -410,6 +410,21 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
> > > >      int r;
> > > >      bool ok;
> > > >
> > > > +    shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
> > > > +    for (unsigned n = 0; n < hdev->nvqs; ++n) {
> > > > +        g_autoptr(VhostShadowVirtqueue) svq;
> > > > +
> > > > +        svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
> > > > +                            v->shadow_vq_ops_opaque);
> > > > +        if (unlikely(!svq)) {
> > > > +            error_setg(errp, "Cannot create svq %u", n);
> > > > +            return -1;
> > > > +        }
> > > > +        g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
> > > > +    }
> > > > +
> > > > +    v->shadow_vqs = g_steal_pointer(&shadow_vqs);
> > > > +
> > > >      if (!v->shadow_vqs_enabled) {
> > > >          return 0;
> > > >      }
> > > > @@ -426,20 +441,6 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
> > > >          return -1;
> > > >      }
> > > >
> > > > -    shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
> > > > -    for (unsigned n = 0; n < hdev->nvqs; ++n) {
> > > > -        g_autoptr(VhostShadowVirtqueue) svq;
> > > > -
> > > > -        svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
> > > > -                            v->shadow_vq_ops_opaque);
> > > > -        if (unlikely(!svq)) {
> > > > -            error_setg(errp, "Cannot create svq %u", n);
> > > > -            return -1;
> > > > -        }
> > > > -        g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
> > > > -    }
> > > > -
> > > > -    v->shadow_vqs = g_steal_pointer(&shadow_vqs);
> > > >      return 0;
> > > >  }
> > > >
> > > > @@ -580,10 +581,6 @@ static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
> > > >      struct vhost_vdpa *v = dev->opaque;
> > > >      size_t idx;
> > > >
> > > > -    if (!v->shadow_vqs) {
> > > > -        return;
> > > > -    }
> > > > -
> > > >      for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
> > > >          vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
> > > >      }
> > > > --
> > > > 2.31.1
> > >
>
Michael S. Tsirkin Oct. 31, 2022, 12:36 p.m. UTC | #5
On Mon, Oct 31, 2022 at 01:34:42PM +0100, Eugenio Perez Martin wrote:
> On Mon, Oct 31, 2022 at 1:25 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Mon, Oct 31, 2022 at 12:56:06PM +0100, Eugenio Perez Martin wrote:
> > > On Mon, Oct 31, 2022 at 9:21 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > >
> > > > On Tue, Oct 11, 2022 at 12:41:50PM +0200, Eugenio Pérez wrote:
> > > > > SVQ may run or not in a device depending on runtime conditions (for
> > > > > example, if the device can move CVQ to its own group or not).
> > > > >
> > > > > Allocate the resources unconditionally, and decide later if to use them
> > > > > or not.
> > > > >
> > > > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > >
> > > > I applied this for now but I really dislike it that we are wasting
> > > > resources like this.
> > > >
> > > > Can I just drop this patch from the series? It looks like things
> > > > will just work anyway ...
> > > >
> > >
> > > It will not work simply dropping this patch, because new code expects
> > > SVQ vrings to be already allocated. But that is doable with more work.
> > >
> > > > I know, when one works on a feature it seems like everyone should
> > > > enable it - but the reality is qemu already works quite well for
> > > > most users and it is our resposibility to first do no harm.
> > > >
> > >
> > > I agree, but then it is better to drop this series entirely for this
> > > merge window. I think it is justified to add it at the beginning of
> > > the next merge window, and to give more time for testing and adding
> > > more features actually.
> >
> > Not sure what "then" means. You tell me - should I drop it?
> >
> 
> Yes, I think it is better to drop it for this merge window, since it
> is possible to both not to allocate SVQ unconditionally and to improve
> the conditions where the shadow CVQ can be enabled.

ok

> > > However, I think shadow CVQ should start by default as long as the
> > > device has the right set of both virtio and vdpa features. Otherwise,
> > > we need another cmdline parameter, something like x-cvq-svq, and the
> > > update of other layers like libvirt.
> > >
> > > Thanks!
> >
> > OK maybe that is not too bad.
> >
> 
> So it would be more preferable to add more parameters?


Sorry i means just for cvq it's not too bad to have svq always.

> >
> > > >
> > > > > ---
> > > > >  hw/virtio/vhost-vdpa.c | 33 +++++++++++++++------------------
> > > > >  1 file changed, 15 insertions(+), 18 deletions(-)
> > > > >
> > > > > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> > > > > index 7f0ff4df5b..d966966131 100644
> > > > > --- a/hw/virtio/vhost-vdpa.c
> > > > > +++ b/hw/virtio/vhost-vdpa.c
> > > > > @@ -410,6 +410,21 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
> > > > >      int r;
> > > > >      bool ok;
> > > > >
> > > > > +    shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
> > > > > +    for (unsigned n = 0; n < hdev->nvqs; ++n) {
> > > > > +        g_autoptr(VhostShadowVirtqueue) svq;
> > > > > +
> > > > > +        svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
> > > > > +                            v->shadow_vq_ops_opaque);
> > > > > +        if (unlikely(!svq)) {
> > > > > +            error_setg(errp, "Cannot create svq %u", n);
> > > > > +            return -1;
> > > > > +        }
> > > > > +        g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
> > > > > +    }
> > > > > +
> > > > > +    v->shadow_vqs = g_steal_pointer(&shadow_vqs);
> > > > > +
> > > > >      if (!v->shadow_vqs_enabled) {
> > > > >          return 0;
> > > > >      }
> > > > > @@ -426,20 +441,6 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
> > > > >          return -1;
> > > > >      }
> > > > >
> > > > > -    shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
> > > > > -    for (unsigned n = 0; n < hdev->nvqs; ++n) {
> > > > > -        g_autoptr(VhostShadowVirtqueue) svq;
> > > > > -
> > > > > -        svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
> > > > > -                            v->shadow_vq_ops_opaque);
> > > > > -        if (unlikely(!svq)) {
> > > > > -            error_setg(errp, "Cannot create svq %u", n);
> > > > > -            return -1;
> > > > > -        }
> > > > > -        g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
> > > > > -    }
> > > > > -
> > > > > -    v->shadow_vqs = g_steal_pointer(&shadow_vqs);
> > > > >      return 0;
> > > > >  }
> > > > >
> > > > > @@ -580,10 +581,6 @@ static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
> > > > >      struct vhost_vdpa *v = dev->opaque;
> > > > >      size_t idx;
> > > > >
> > > > > -    if (!v->shadow_vqs) {
> > > > > -        return;
> > > > > -    }
> > > > > -
> > > > >      for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
> > > > >          vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
> > > > >      }
> > > > > --
> > > > > 2.31.1
> > > >
> >
diff mbox series

Patch

diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 7f0ff4df5b..d966966131 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -410,6 +410,21 @@  static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
     int r;
     bool ok;
 
+    shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
+    for (unsigned n = 0; n < hdev->nvqs; ++n) {
+        g_autoptr(VhostShadowVirtqueue) svq;
+
+        svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
+                            v->shadow_vq_ops_opaque);
+        if (unlikely(!svq)) {
+            error_setg(errp, "Cannot create svq %u", n);
+            return -1;
+        }
+        g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
+    }
+
+    v->shadow_vqs = g_steal_pointer(&shadow_vqs);
+
     if (!v->shadow_vqs_enabled) {
         return 0;
     }
@@ -426,20 +441,6 @@  static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
         return -1;
     }
 
-    shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
-    for (unsigned n = 0; n < hdev->nvqs; ++n) {
-        g_autoptr(VhostShadowVirtqueue) svq;
-
-        svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
-                            v->shadow_vq_ops_opaque);
-        if (unlikely(!svq)) {
-            error_setg(errp, "Cannot create svq %u", n);
-            return -1;
-        }
-        g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
-    }
-
-    v->shadow_vqs = g_steal_pointer(&shadow_vqs);
     return 0;
 }
 
@@ -580,10 +581,6 @@  static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
     struct vhost_vdpa *v = dev->opaque;
     size_t idx;
 
-    if (!v->shadow_vqs) {
-        return;
-    }
-
     for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
         vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
     }