diff mbox series

[9/9] igb: respect VMVIR and VMOLR for VLAN

Message ID 20230128134633.22730-10-sriram.yagnaraman@est.tech
State New
Headers show
Series igb: add missing feature set from | expand

Commit Message

Sriram Yagnaraman Jan. 28, 2023, 1:46 p.m. UTC
Add support for stripping/inserting VLAN for VFs.

Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
---
 hw/net/igb_core.c | 100 ++++++++++++++++++++++++++++++----------------
 1 file changed, 65 insertions(+), 35 deletions(-)

Comments

Akihiko Odaki Jan. 29, 2023, 8:13 a.m. UTC | #1
On 2023/01/28 22:46, Sriram Yagnaraman wrote:
> Add support for stripping/inserting VLAN for VFs.
> 
> Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
> ---
>   hw/net/igb_core.c | 100 ++++++++++++++++++++++++++++++----------------
>   1 file changed, 65 insertions(+), 35 deletions(-)
> 
> diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c
> index 8e33e15505..96a5c5eca3 100644
> --- a/hw/net/igb_core.c
> +++ b/hw/net/igb_core.c
> @@ -384,6 +384,26 @@ igb_rss_parse_packet(IGBCore *core, struct NetRxPkt *pkt, bool tx,
>       info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash);
>   }
>   
> +static inline bool
> +igb_tx_insert_vlan(IGBCore *core, uint16_t qn,
> +                   struct igb_tx *tx, bool desc_vle)
> +{
> +    if (core->mac[MRQC] & 1) {
> +        uint16_t pool = (qn > IGB_MAX_VF_FUNCTIONS) ?
> +                        (qn - IGB_MAX_VF_FUNCTIONS) : qn;
> +
> +        if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_DEFAULT) {
> +            /* always insert default VLAN */
> +            desc_vle = true;
> +            tx->vlan = core->mac[VMVIR0 + pool] & 0xfff;

This should be masked with 0xffff; "Port VLAN ID" field is defined as 
16-bit.

> +        } else if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_NEVER) {
> +            return false;
> +        }
> +    }
> +
> +    return desc_vle && e1000x_vlan_enabled(core->mac);
> +}
> +
>   static bool
>   igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx)
>   {
> @@ -580,7 +600,8 @@ igb_process_tx_desc(IGBCore *core,
>   
>       if (cmd_type_len & E1000_TXD_CMD_EOP) {
>           if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) {
> -            if (cmd_type_len & E1000_TXD_CMD_VLE) {
> +            if (igb_tx_insert_vlan(core, queue_index, tx,
> +                (cmd_type_len & E1000_TXD_CMD_VLE))) {

The fourth parameter of igb_tx_insert_vlan() is bool, which is defined 
as it only ensures it has storage of 1-bit, but this passes a value 
greater than that.

>                   net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, tx->vlan,
>                       core->mac[VET] & 0xffff);
>               }
> @@ -1514,6 +1535,22 @@ igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt,
>       igb_update_rx_stats(core, rxi, size, total_size);
>   }
>   
> +static inline bool
> +igb_rx_strip_vlan(IGBCore *core, const E1000E_RingInfo *rxi,
> +    eth_pkt_types_e pkt_type)
> +{
> +    if (core->mac[MRQC] & 1) {
> +        uint16_t qn = rxi->idx;
> +        uint16_t pool = (qn > IGB_MAX_VF_FUNCTIONS) ?
> +                        (qn - IGB_MAX_VF_FUNCTIONS) : qn;
> +        return (pkt_type == ETH_PKT_MCAST) ?
> +                core->mac[RPLOLR] & E1000_RPLOLR_STRVLAN :
> +                core->mac[VMOLR0 + pool] & E1000_VMOLR_STRVLAN;
> +    }
> +
> +    return e1000x_vlan_enabled(core->mac);
> +}
> +
>   static inline bool
>   igb_is_oversized(IGBCore *core, const E1000E_RingInfo *rxi, size_t size)
>   {
> @@ -1574,6 +1611,7 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
>       size_t total_size;
>       ssize_t retval = 0;
>       int i;
> +    bool strip_vlan = false;

strip_vlan does not need a default value. Having a default value will 
suppresss compiler warnings when you actually need to compute a valid value.

>   
>       trace_e1000e_rx_receive_iov(iovcnt);
>   
> @@ -1615,10 +1653,7 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
>   
>       ehdr = PKT_GET_ETH_HDR(filter_buf);
>       net_rx_pkt_set_packet_type(core->rx_pkt, get_eth_packet_type(ehdr));
> -
> -    net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs,
> -                               e1000x_vlan_enabled(core->mac),
> -                               core->mac[VET] & 0xffff);
> +    net_rx_pkt_set_protocols(core->rx_pkt, filter_buf, size);
>   
>       queues = igb_receive_assign(core, ehdr, &rss_info, external_tx);
>       if (!queues) {
> @@ -1626,8 +1661,8 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
>           return orig_size;
>       }
>   
> -    total_size = net_rx_pkt_get_total_len(core->rx_pkt) +
> -        e1000x_fcs_len(core->mac);
> +    retval = orig_size;
> +    total_size = size + e1000x_fcs_len(core->mac);
>   
>       for (i = 0; i < IGB_NUM_QUEUES; i++) {
>           if (!(queues & BIT(i))) {
> @@ -1635,43 +1670,38 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
>           }
>   
>           igb_rx_ring_init(core, &rxr, i);
> +        strip_vlan = igb_rx_strip_vlan(core, rxr.i,
> +            get_eth_packet_type(ehdr));
> +        net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs,
> +            strip_vlan, core->mac[VET] & 0xffff);
> +        igb_rx_fix_l4_csum(core, core->rx_pkt);
> +
>           if (!igb_has_rxbufs(core, rxr.i, total_size)) {
>               icr_bits |= E1000_ICS_RXO;
> +            continue;
>           }
> -    }
> -
> -    if (!icr_bits) {
> -        retval = orig_size;
> -        igb_rx_fix_l4_csum(core, core->rx_pkt);
> -
> -        for (i = 0; i < IGB_NUM_QUEUES; i++) {
> -            if (!(queues & BIT(i))) {
> -                continue;
> -            }
>   
> -            igb_rx_ring_init(core, &rxr, i);
> -            if (igb_is_oversized(core, rxr.i, size)) {
> -                oversized |= BIT(i);
> -                continue;
> -            }
> +        if (igb_is_oversized(core, rxr.i, total_size)) {
> +            oversized |= BIT(i);
> +            continue;
> +        }
>   
> -            if (!(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) {
> -                continue;
> -            }
> +        if (!(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) {
> +            continue;
> +        }
>   
> -            trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
> -            igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info);
> +        trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
> +        igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info);
>   
> -            /* Check if receive descriptor minimum threshold hit */
> -            if (igb_rx_descr_threshold_hit(core, rxr.i)) {
> -                icr_bits |= E1000_ICS_RXDMT0;
> -            }
> +        /* Check if receive descriptor minimum threshold hit */
> +        if (igb_rx_descr_threshold_hit(core, rxr.i)) {
> +            icr_bits |= E1000_ICS_RXDMT0;
> +        }
>   
> -            core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx);
> +        core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx);
>   
> -            /* same as RXDW (rx descriptor written back)*/
> -            icr_bits |= E1000_ICR_RXDW;
> -        }
> +        /* same as RXDW (rx descriptor written back)*/
> +        icr_bits |= E1000_ICR_RXDW;
>       }
>   
>       /* 8.19.37 increment ROC only if packet is oversized for all queues */
diff mbox series

Patch

diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c
index 8e33e15505..96a5c5eca3 100644
--- a/hw/net/igb_core.c
+++ b/hw/net/igb_core.c
@@ -384,6 +384,26 @@  igb_rss_parse_packet(IGBCore *core, struct NetRxPkt *pkt, bool tx,
     info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash);
 }
 
+static inline bool
+igb_tx_insert_vlan(IGBCore *core, uint16_t qn,
+                   struct igb_tx *tx, bool desc_vle)
+{
+    if (core->mac[MRQC] & 1) {
+        uint16_t pool = (qn > IGB_MAX_VF_FUNCTIONS) ?
+                        (qn - IGB_MAX_VF_FUNCTIONS) : qn;
+
+        if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_DEFAULT) {
+            /* always insert default VLAN */
+            desc_vle = true;
+            tx->vlan = core->mac[VMVIR0 + pool] & 0xfff;
+        } else if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_NEVER) {
+            return false;
+        }
+    }
+
+    return desc_vle && e1000x_vlan_enabled(core->mac);
+}
+
 static bool
 igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx)
 {
@@ -580,7 +600,8 @@  igb_process_tx_desc(IGBCore *core,
 
     if (cmd_type_len & E1000_TXD_CMD_EOP) {
         if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) {
-            if (cmd_type_len & E1000_TXD_CMD_VLE) {
+            if (igb_tx_insert_vlan(core, queue_index, tx,
+                (cmd_type_len & E1000_TXD_CMD_VLE))) {
                 net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, tx->vlan,
                     core->mac[VET] & 0xffff);
             }
@@ -1514,6 +1535,22 @@  igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt,
     igb_update_rx_stats(core, rxi, size, total_size);
 }
 
+static inline bool
+igb_rx_strip_vlan(IGBCore *core, const E1000E_RingInfo *rxi,
+    eth_pkt_types_e pkt_type)
+{
+    if (core->mac[MRQC] & 1) {
+        uint16_t qn = rxi->idx;
+        uint16_t pool = (qn > IGB_MAX_VF_FUNCTIONS) ?
+                        (qn - IGB_MAX_VF_FUNCTIONS) : qn;
+        return (pkt_type == ETH_PKT_MCAST) ?
+                core->mac[RPLOLR] & E1000_RPLOLR_STRVLAN :
+                core->mac[VMOLR0 + pool] & E1000_VMOLR_STRVLAN;
+    }
+
+    return e1000x_vlan_enabled(core->mac);
+}
+
 static inline bool
 igb_is_oversized(IGBCore *core, const E1000E_RingInfo *rxi, size_t size)
 {
@@ -1574,6 +1611,7 @@  igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
     size_t total_size;
     ssize_t retval = 0;
     int i;
+    bool strip_vlan = false;
 
     trace_e1000e_rx_receive_iov(iovcnt);
 
@@ -1615,10 +1653,7 @@  igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
 
     ehdr = PKT_GET_ETH_HDR(filter_buf);
     net_rx_pkt_set_packet_type(core->rx_pkt, get_eth_packet_type(ehdr));
-
-    net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs,
-                               e1000x_vlan_enabled(core->mac),
-                               core->mac[VET] & 0xffff);
+    net_rx_pkt_set_protocols(core->rx_pkt, filter_buf, size);
 
     queues = igb_receive_assign(core, ehdr, &rss_info, external_tx);
     if (!queues) {
@@ -1626,8 +1661,8 @@  igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
         return orig_size;
     }
 
-    total_size = net_rx_pkt_get_total_len(core->rx_pkt) +
-        e1000x_fcs_len(core->mac);
+    retval = orig_size;
+    total_size = size + e1000x_fcs_len(core->mac);
 
     for (i = 0; i < IGB_NUM_QUEUES; i++) {
         if (!(queues & BIT(i))) {
@@ -1635,43 +1670,38 @@  igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
         }
 
         igb_rx_ring_init(core, &rxr, i);
+        strip_vlan = igb_rx_strip_vlan(core, rxr.i,
+            get_eth_packet_type(ehdr));
+        net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs,
+            strip_vlan, core->mac[VET] & 0xffff);
+        igb_rx_fix_l4_csum(core, core->rx_pkt);
+
         if (!igb_has_rxbufs(core, rxr.i, total_size)) {
             icr_bits |= E1000_ICS_RXO;
+            continue;
         }
-    }
-
-    if (!icr_bits) {
-        retval = orig_size;
-        igb_rx_fix_l4_csum(core, core->rx_pkt);
-
-        for (i = 0; i < IGB_NUM_QUEUES; i++) {
-            if (!(queues & BIT(i))) {
-                continue;
-            }
 
-            igb_rx_ring_init(core, &rxr, i);
-            if (igb_is_oversized(core, rxr.i, size)) {
-                oversized |= BIT(i);
-                continue;
-            }
+        if (igb_is_oversized(core, rxr.i, total_size)) {
+            oversized |= BIT(i);
+            continue;
+        }
 
-            if (!(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) {
-                continue;
-            }
+        if (!(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) {
+            continue;
+        }
 
-            trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
-            igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info);
+        trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
+        igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info);
 
-            /* Check if receive descriptor minimum threshold hit */
-            if (igb_rx_descr_threshold_hit(core, rxr.i)) {
-                icr_bits |= E1000_ICS_RXDMT0;
-            }
+        /* Check if receive descriptor minimum threshold hit */
+        if (igb_rx_descr_threshold_hit(core, rxr.i)) {
+            icr_bits |= E1000_ICS_RXDMT0;
+        }
 
-            core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx);
+        core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx);
 
-            /* same as RXDW (rx descriptor written back)*/
-            icr_bits |= E1000_ICR_RXDW;
-        }
+        /* same as RXDW (rx descriptor written back)*/
+        icr_bits |= E1000_ICR_RXDW;
     }
 
     /* 8.19.37 increment ROC only if packet is oversized for all queues */