diff mbox series

[v2,9/9] igb: respect VMVIR and VMOLR for VLAN

Message ID 20230130132304.2347-10-sriram.yagnaraman@est.tech
State New
Headers show
Series igb: merge changes from <20221229190817.25500-1-sriram.yagnaraman@est.tech> | expand

Commit Message

Sriram Yagnaraman Jan. 30, 2023, 1:23 p.m. UTC
Add support for stripping/inserting VLAN for VFs.

Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
---
 hw/net/igb_core.c | 84 ++++++++++++++++++++++++++++++-----------------
 1 file changed, 54 insertions(+), 30 deletions(-)
diff mbox series

Patch

diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c
index 5ca666229e..a511c64773 100644
--- a/hw/net/igb_core.c
+++ b/hw/net/igb_core.c
@@ -384,6 +384,25 @@  igb_rss_parse_packet(IGBCore *core, struct NetRxPkt *pkt, bool tx,
     info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash);
 }
 
+static inline bool
+igb_tx_insert_vlan(IGBCore *core, uint16_t qn,
+                   struct igb_tx *tx, bool desc_vle)
+{
+    if (core->mac[MRQC] & 1) {
+        uint16_t pool = qn % IGB_MAX_VM_POOLS;
+
+        if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_DEFAULT) {
+            /* always insert default VLAN */
+            desc_vle = true;
+            tx->vlan = core->mac[VMVIR0 + pool] & 0xffff;
+        } else if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_NEVER) {
+            return false;
+        }
+    }
+
+    return desc_vle && e1000x_vlan_enabled(core->mac);
+}
+
 static bool
 igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx)
 {
@@ -579,7 +598,8 @@  igb_process_tx_desc(IGBCore *core,
 
     if (cmd_type_len & E1000_TXD_CMD_EOP) {
         if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) {
-            if (cmd_type_len & E1000_TXD_CMD_VLE) {
+            if (igb_tx_insert_vlan(core, queue_index, tx,
+                !!(cmd_type_len & E1000_TXD_CMD_VLE))) {
                 net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, tx->vlan,
                     core->mac[VET] & 0xffff);
             }
@@ -1541,6 +1561,19 @@  igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt,
     igb_update_rx_stats(core, rxi, size, total_size);
 }
 
+static bool
+igb_rx_strip_vlan(IGBCore *core, const E1000E_RingInfo *rxi)
+{
+    if (core->mac[MRQC] & 1) {
+        uint16_t pool = rxi->idx % IGB_MAX_VM_POOLS;
+        return (net_rx_pkt_get_packet_type(core->rx_pkt) == ETH_PKT_MCAST) ?
+                core->mac[RPLOLR] & E1000_RPLOLR_STRVLAN :
+                core->mac[VMOLR0 + pool] & E1000_VMOLR_STRVLAN;
+    }
+
+    return e1000x_vlan_enabled(core->mac);
+}
+
 static inline void
 igb_rx_fix_l4_csum(IGBCore *core, struct NetRxPkt *pkt)
 {
@@ -1622,10 +1655,7 @@  igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
 
     ehdr = PKT_GET_ETH_HDR(filter_buf);
     net_rx_pkt_set_packet_type(core->rx_pkt, get_eth_packet_type(ehdr));
-
-    net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs,
-                               e1000x_vlan_enabled(core->mac),
-                               core->mac[VET] & 0xffff);
+    net_rx_pkt_set_protocols(core->rx_pkt, filter_buf, size);
 
     queues = igb_receive_assign(core, ehdr, size, &rss_info, external_tx);
     if (!queues) {
@@ -1633,44 +1663,38 @@  igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
         return orig_size;
     }
 
-    total_size = net_rx_pkt_get_total_len(core->rx_pkt) +
-        e1000x_fcs_len(core->mac);
+    retval = orig_size;
+    total_size = size + e1000x_fcs_len(core->mac);
 
     for (i = 0; i < IGB_NUM_QUEUES; i++) {
-        if (!(queues & BIT(i))) {
+        if (!(queues & BIT(i)) &&
+            !(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) {
             continue;
         }
 
         igb_rx_ring_init(core, &rxr, i);
+        net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs,
+                                   igb_rx_strip_vlan(core, rxr.i),
+                                   core->mac[VET] & 0xffff);
+        igb_rx_fix_l4_csum(core, core->rx_pkt);
+
         if (!igb_has_rxbufs(core, rxr.i, total_size)) {
             icr_bits |= E1000_ICS_RXO;
+            continue;
         }
-    }
 
-    if (!icr_bits) {
-        retval = orig_size;
-        igb_rx_fix_l4_csum(core, core->rx_pkt);
-
-        for (i = 0; i < IGB_NUM_QUEUES; i++) {
-            if (!(queues & BIT(i)) ||
-                !(core->mac[E1000_RXDCTL(i) >> 2] & E1000_RXDCTL_QUEUE_ENABLE)) {
-                continue;
-            }
-
-            igb_rx_ring_init(core, &rxr, i);
+        trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
+        igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info);
 
-            trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
-            igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info);
-
-            /* Check if receive descriptor minimum threshold hit */
-            if (igb_rx_descr_threshold_hit(core, rxr.i)) {
-                icr_bits |= E1000_ICS_RXDMT0;
-            }
+        /* Check if receive descriptor minimum threshold hit */
+        if (igb_rx_descr_threshold_hit(core, rxr.i)) {
+            icr_bits |= E1000_ICS_RXDMT0;
+        }
 
-            core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx);
+        core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx);
 
-            icr_bits |= E1000_ICR_RXDW;
-        }
+        /* same as RXDW (rx descriptor written back)*/
+        icr_bits |= E1000_ICR_RXDW;
     }
 
     if (icr_bits & E1000_ICR_RXDW) {