diff mbox series

[ovs-dev,v2] offload-dpdk: Don't use 24bit value as 32bit.

Message ID 20250501194509.181532-1-mkp@redhat.com
State Changes Requested
Delegated to: Ilya Maximets
Headers show
Series [ovs-dev,v2] offload-dpdk: Don't use 24bit value as 32bit. | expand

Checks

Context Check Description
ovsrobot/apply-robot success apply and check: success
ovsrobot/github-robot-_Build_and_Test success github build: passed
ovsrobot/cirrus-robot success cirrus build: passed

Commit Message

Mike Pattrick May 1, 2025, 7:45 p.m. UTC
Currently while handling the 24 bit VNI field in VXLAN code,
netdev-offload-dpdk will actually read and write 32 bits. The byte that
is overwritten is reserved and supposed to be set to zero anyways, so
this is mostly harmless.

However, Openscanhub correctly identified this as a buffer overrun. Now
memcpy is used to limit the memory range accessed.

Reported-at: https://issues.redhat.com/browse/FDP-1122
Signed-off-by: Mike Pattrick <mkp@redhat.com>
---
v2:
 - Changed custom functions to memcpy
 - Switched to use hdr struct member.
---
 lib/netdev-offload-dpdk.c | 21 +++++++++------------
 1 file changed, 9 insertions(+), 12 deletions(-)

Comments

Ilya Maximets May 14, 2025, 2:29 p.m. UTC | #1
On 5/1/25 9:45 PM, Mike Pattrick via dev wrote:
> Currently while handling the 24 bit VNI field in VXLAN code,
> netdev-offload-dpdk will actually read and write 32 bits. The byte that
> is overwritten is reserved and supposed to be set to zero anyways, so
> this is mostly harmless.
> 
> However, Openscanhub correctly identified this as a buffer overrun. Now
> memcpy is used to limit the memory range accessed.
> 
> Reported-at: https://issues.redhat.com/browse/FDP-1122
> Signed-off-by: Mike Pattrick <mkp@redhat.com>
> ---
> v2:
>  - Changed custom functions to memcpy
>  - Switched to use hdr struct member.
> ---
>  lib/netdev-offload-dpdk.c | 21 +++++++++------------
>  1 file changed, 9 insertions(+), 12 deletions(-)
> 
> diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c
> index 6ca271489..1634a86af 100644
> --- a/lib/netdev-offload-dpdk.c
> +++ b/lib/netdev-offload-dpdk.c
> @@ -607,17 +607,15 @@ dump_flow_pattern(struct ds *s,
>      } else if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
>          const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
>          const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
> -        ovs_be32 spec_vni, mask_vni;
> +        ovs_be32 spec_vni = 0, mask_vni = 0;
>  
>          ds_put_cstr(s, "vxlan ");
>          if (vxlan_spec) {
>              if (!vxlan_mask) {
>                  vxlan_mask = &rte_flow_item_vxlan_mask;
>              }
> -            spec_vni = get_unaligned_be32(ALIGNED_CAST(ovs_be32 *,
> -                                                       vxlan_spec->vni));
> -            mask_vni = get_unaligned_be32(ALIGNED_CAST(ovs_be32 *,
> -                                                       vxlan_mask->vni));
> +            memcpy(&spec_vni, vxlan_spec->hdr.vni, sizeof vxlan_spec->hdr.vni);
> +            memcpy(&mask_vni, vxlan_mask->hdr.vni, sizeof vxlan_mask->hdr.vni);

Hi, Mike.

My original suggestion in v1 was to either use memcpy or switch to the hdr.
And since we should be moving to hdr, we should be able to just copy the whole
4 bytes of vx_vni without need for memcpy.  Copying 4 bytes should also be more
efficient for the CPU to do.  The hdr struct is packed, but the value is actually
32-bit aligned already AFAICT, so we may even get away with a simple assignment.
If not, then get_16aligned_be32() (16-byte alignment is usually guaranteed for
network headers) would still be more efficient then copying 3 bytes one by one.

Best regards, Ilya Maximets.
diff mbox series

Patch

diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c
index 6ca271489..1634a86af 100644
--- a/lib/netdev-offload-dpdk.c
+++ b/lib/netdev-offload-dpdk.c
@@ -607,17 +607,15 @@  dump_flow_pattern(struct ds *s,
     } else if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
         const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
         const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
-        ovs_be32 spec_vni, mask_vni;
+        ovs_be32 spec_vni = 0, mask_vni = 0;
 
         ds_put_cstr(s, "vxlan ");
         if (vxlan_spec) {
             if (!vxlan_mask) {
                 vxlan_mask = &rte_flow_item_vxlan_mask;
             }
-            spec_vni = get_unaligned_be32(ALIGNED_CAST(ovs_be32 *,
-                                                       vxlan_spec->vni));
-            mask_vni = get_unaligned_be32(ALIGNED_CAST(ovs_be32 *,
-                                                       vxlan_mask->vni));
+            memcpy(&spec_vni, vxlan_spec->hdr.vni, sizeof vxlan_spec->hdr.vni);
+            memcpy(&mask_vni, vxlan_mask->hdr.vni, sizeof vxlan_mask->hdr.vni);
             DUMP_PATTERN_ITEM(vxlan_mask->vni, false, "vni", "%"PRIu32,
                               ntohl(spec_vni) >> 8, ntohl(mask_vni) >> 8, 0);
         }
@@ -693,10 +691,9 @@  dump_vxlan_encap(struct ds *s, const struct rte_flow_item *items)
     ds_put_format(s, "set vxlan ip-version %s ",
                   ipv4 ? "ipv4" : ipv6 ? "ipv6" : "ERR");
     if (vxlan) {
-        ovs_be32 vni;
+        ovs_be32 vni = 0;
 
-        vni = get_unaligned_be32(ALIGNED_CAST(ovs_be32 *,
-                                              vxlan->vni));
+        memcpy(&vni, &vxlan->hdr.vni, sizeof vxlan->hdr.vni);
         ds_put_format(s, "vni %"PRIu32" ", ntohl(vni) >> 8);
     }
     if (udp) {
@@ -1287,6 +1284,7 @@  parse_vxlan_match(struct flow_patterns *patterns,
 {
     struct rte_flow_item_vxlan *vx_spec, *vx_mask;
     struct flow *consumed_masks;
+    ovs_be32 tun_id;
     int ret;
 
     ret = parse_tnl_ip_match(patterns, match, IPPROTO_UDP);
@@ -1300,10 +1298,9 @@  parse_vxlan_match(struct flow_patterns *patterns,
     vx_spec = xzalloc(sizeof *vx_spec);
     vx_mask = xzalloc(sizeof *vx_mask);
 
-    put_unaligned_be32(ALIGNED_CAST(ovs_be32 *, vx_spec->vni),
-                       htonl(ntohll(match->flow.tunnel.tun_id) << 8));
-    put_unaligned_be32(ALIGNED_CAST(ovs_be32 *, vx_mask->vni),
-                       htonl(ntohll(match->wc.masks.tunnel.tun_id) << 8));
+    tun_id = htonl(ntohll(match->flow.tunnel.tun_id) << 8);
+    memcpy(vx_spec->hdr.vni, &tun_id, sizeof vx_spec->hdr.vni);
+    memcpy(vx_mask->hdr.vni, &tun_id, sizeof vx_spec->hdr.vni);
 
     consumed_masks->tunnel.tun_id = 0;
     consumed_masks->tunnel.flags = 0;