diff mbox series

[RFC,v2,07/13] vdpa: delay set_vring_ready after DRIVER_OK

Message ID 20230112172434.760850-8-eperezma@redhat.com
State New
Headers show
Series Dinamycally switch to vhost shadow virtqueues at vdpa net migration | expand

Commit Message

Eugenio Perez Martin Jan. 12, 2023, 5:24 p.m. UTC
To restore the device at the destination of a live migration we send the
commands through control virtqueue. For a device to read CVQ it must
have received the DRIVER_OK status bit.

However this opens a window where the device could start receiving
packets in rx queue 0 before it receives the RSS configuration. To avoid
that, we will not send vring_enable until all configuration is used by
the device.

Delegating the sending of VHOST_VDPA_SET_VRING_ENABLE
to the vhost_set_vring_ready VhostOp.

Signed-off-by: Eugenio PĂ©rez <eperezma@redhat.com>
---
 hw/net/vhost_net.c     | 8 ++++++--
 hw/virtio/vhost-vdpa.c | 8 ++++++--
 2 files changed, 12 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 3900599465..87938b4449 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -406,15 +406,19 @@  int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
     }
 
     for (int j = 0; j < nvhosts; j++) {
+        int enable;
+
         if (j < data_queue_pairs) {
             peer = qemu_get_peer(ncs, j);
         } else {
             peer = qemu_get_peer(ncs, n->max_queue_pairs);
         }
 
-        if (peer->vring_enable) {
+        enable = net->nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA ||
+                 peer->vring_enable;
+        if (enable) {
             /* restore vring enable state */
-            r = vhost_set_vring_enable(peer, peer->vring_enable);
+            r = vhost_set_vring_enable(peer, enable);
 
             if (r < 0) {
                 goto err_start;
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 28a52ddc78..4296427a69 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -722,9 +722,13 @@  static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
     return idx;
 }
 
-static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
+static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev, int ready)
 {
     int i;
+
+    if (unlikely(!ready)) {
+        return -ENOTSUP;
+    }
     trace_vhost_vdpa_set_vring_ready(dev);
     for (i = 0; i < dev->nvqs; ++i) {
         struct vhost_vring_state state = {
@@ -1119,7 +1123,6 @@  static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
         if (unlikely(!ok)) {
             return -1;
         }
-        vhost_vdpa_set_vring_ready(dev);
     } else {
         vhost_vdpa_svqs_stop(dev);
         vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
@@ -1324,6 +1327,7 @@  const VhostOps vdpa_ops = {
         .vhost_set_features = vhost_vdpa_set_features,
         .vhost_reset_device = vhost_vdpa_reset_device,
         .vhost_get_vq_index = vhost_vdpa_get_vq_index,
+        .vhost_set_vring_enable = vhost_vdpa_set_vring_ready,
         .vhost_get_config  = vhost_vdpa_get_config,
         .vhost_set_config = vhost_vdpa_set_config,
         .vhost_requires_shm_log = NULL,