diff mbox series

[v9,11/12] vdpa: Add virtio-net mac address via CVQ at start

Message ID 20220819170048.3593487-12-eperezma@redhat.com
State New
Headers show
Series NIC vhost-vdpa state restore via Shadow CVQ | expand

Commit Message

Eugenio Perez Martin Aug. 19, 2022, 5 p.m. UTC
This is needed so the destination vdpa device see the same state a the
guest set in the source.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
v9:
* Use guest acked features instead of device's.
* Constify vhost_vdpa and VirtIONet variables.
* Delete unneeded increment of cursor.

v8:
* Delete unneeded copy from device's in buffer.

v6:
* Map and unmap command buffers at the start and end of device usage.

v5:
* Rename s/start/load/
* Use independent NetClientInfo to only add load callback on cvq.
---
 net/vhost-vdpa.c | 40 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 40 insertions(+)

Comments

Jason Wang Aug. 23, 2022, 6:41 a.m. UTC | #1
在 2022/8/20 01:00, Eugenio Pérez 写道:
> This is needed so the destination vdpa device see the same state a the
> guest set in the source.
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>


Acked-by: Jason Wang <jasowang@redhat.com>


> ---
> v9:
> * Use guest acked features instead of device's.
> * Constify vhost_vdpa and VirtIONet variables.
> * Delete unneeded increment of cursor.
>
> v8:
> * Delete unneeded copy from device's in buffer.
>
> v6:
> * Map and unmap command buffers at the start and end of device usage.
>
> v5:
> * Rename s/start/load/
> * Use independent NetClientInfo to only add load callback on cvq.
> ---
>   net/vhost-vdpa.c | 40 ++++++++++++++++++++++++++++++++++++++++
>   1 file changed, 40 insertions(+)
>
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index ebf76d1034..8fad31a5fd 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -363,11 +363,51 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
>       return vhost_svq_poll(svq);
>   }
>   
> +static int vhost_vdpa_net_load(NetClientState *nc)
> +{
> +    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> +    const struct vhost_vdpa *v = &s->vhost_vdpa;
> +    const VirtIONet *n;
> +    uint64_t features;
> +
> +    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> +
> +    if (!v->shadow_vqs_enabled) {
> +        return 0;
> +    }
> +
> +    n = VIRTIO_NET(v->dev->vdev);
> +    features = n->parent_obj.guest_features;
> +    if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
> +        const struct virtio_net_ctrl_hdr ctrl = {
> +            .class = VIRTIO_NET_CTRL_MAC,
> +            .cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET,
> +        };
> +        char *cursor = s->cvq_cmd_out_buffer;
> +        ssize_t dev_written;
> +
> +        memcpy(cursor, &ctrl, sizeof(ctrl));
> +        cursor += sizeof(ctrl);
> +        memcpy(cursor, n->mac, sizeof(n->mac));
> +
> +        dev_written = vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + sizeof(n->mac),
> +                                             sizeof(virtio_net_ctrl_ack));
> +        if (unlikely(dev_written < 0)) {
> +            return dev_written;
> +        }
> +
> +        return *((virtio_net_ctrl_ack *)s->cvq_cmd_in_buffer) != VIRTIO_NET_OK;
> +    }
> +
> +    return 0;
> +}
> +
>   static NetClientInfo net_vhost_vdpa_cvq_info = {
>       .type = NET_CLIENT_DRIVER_VHOST_VDPA,
>       .size = sizeof(VhostVDPAState),
>       .receive = vhost_vdpa_receive,
>       .start = vhost_vdpa_net_cvq_start,
> +    .load = vhost_vdpa_net_load,
>       .stop = vhost_vdpa_net_cvq_stop,
>       .cleanup = vhost_vdpa_cleanup,
>       .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
diff mbox series

Patch

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index ebf76d1034..8fad31a5fd 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -363,11 +363,51 @@  static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
     return vhost_svq_poll(svq);
 }
 
+static int vhost_vdpa_net_load(NetClientState *nc)
+{
+    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+    const struct vhost_vdpa *v = &s->vhost_vdpa;
+    const VirtIONet *n;
+    uint64_t features;
+
+    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+
+    if (!v->shadow_vqs_enabled) {
+        return 0;
+    }
+
+    n = VIRTIO_NET(v->dev->vdev);
+    features = n->parent_obj.guest_features;
+    if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+        const struct virtio_net_ctrl_hdr ctrl = {
+            .class = VIRTIO_NET_CTRL_MAC,
+            .cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET,
+        };
+        char *cursor = s->cvq_cmd_out_buffer;
+        ssize_t dev_written;
+
+        memcpy(cursor, &ctrl, sizeof(ctrl));
+        cursor += sizeof(ctrl);
+        memcpy(cursor, n->mac, sizeof(n->mac));
+
+        dev_written = vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + sizeof(n->mac),
+                                             sizeof(virtio_net_ctrl_ack));
+        if (unlikely(dev_written < 0)) {
+            return dev_written;
+        }
+
+        return *((virtio_net_ctrl_ack *)s->cvq_cmd_in_buffer) != VIRTIO_NET_OK;
+    }
+
+    return 0;
+}
+
 static NetClientInfo net_vhost_vdpa_cvq_info = {
     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
     .size = sizeof(VhostVDPAState),
     .receive = vhost_vdpa_receive,
     .start = vhost_vdpa_net_cvq_start,
+    .load = vhost_vdpa_net_load,
     .stop = vhost_vdpa_net_cvq_stop,
     .cleanup = vhost_vdpa_cleanup,
     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,