diff mbox series

[v2,3/9] igb: implement VFRE and VFTE registers

Message ID 20230130132304.2347-4-sriram.yagnaraman@est.tech
State New
Headers show
Series igb: merge changes from <20221229190817.25500-1-sriram.yagnaraman@est.tech> | expand

Commit Message

Sriram Yagnaraman Jan. 30, 2023, 1:22 p.m. UTC
Also add checks for RXDCTL/TXDCTL queue enable bits

Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
---
 hw/net/igb_core.c | 41 ++++++++++++++++++++++++++++++-----------
 hw/net/igb_regs.h |  4 +++-
 2 files changed, 33 insertions(+), 12 deletions(-)

Comments

Akihiko Odaki Jan. 30, 2023, 2:19 p.m. UTC | #1
On 2023/01/30 22:22, Sriram Yagnaraman wrote:
> Also add checks for RXDCTL/TXDCTL queue enable bits
> 
> Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
> ---
>   hw/net/igb_core.c | 41 ++++++++++++++++++++++++++++++-----------
>   hw/net/igb_regs.h |  4 +++-
>   2 files changed, 33 insertions(+), 12 deletions(-)
> 
> diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c
> index 9bd53cc25f..b8c01cb773 100644
> --- a/hw/net/igb_core.c
> +++ b/hw/net/igb_core.c
> @@ -778,6 +778,18 @@ igb_txdesc_writeback(IGBCore *core, dma_addr_t base,
>       return igb_tx_wb_eic(core, txi->idx);
>   }
>   
> +static inline bool
> +igb_tx_enabled(IGBCore *core, const E1000E_RingInfo *txi)
> +{
> +    bool vmdq = core->mac[MRQC] & 1;
> +    uint16_t qn = txi->idx;
> +    uint16_t vfn = qn % IGB_MAX_VM_POOLS;

Rename vfn to pool as you did in other places.

> +
> +    return (core->mac[TCTL] & E1000_TCTL_EN) &&
> +        (!vmdq || core->mac[VFTE] & BIT(vfn)) &&
> +        (core->mac[TXDCTL0 + (qn * 16)] & E1000_TXDCTL_QUEUE_ENABLE);
> +}
> +
>   static void
>   igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
>   {
> @@ -787,8 +799,7 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
>       const E1000E_RingInfo *txi = txr->i;
>       uint32_t eic = 0;
>   
> -    /* TODO: check if the queue itself is enabled too. */
> -    if (!(core->mac[TCTL] & E1000_TCTL_EN)) {
> +    if (!igb_tx_enabled(core, txi)) {
>           trace_e1000e_tx_disabled();
>           return;
>       }
> @@ -1003,6 +1014,7 @@ static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr,
>               queues = BIT(def_pl >> E1000_VT_CTL_DEFAULT_POOL_SHIFT);
>           }
>   
> +        queues &= core->mac[VFRE];
>           igb_rss_parse_packet(core, core->rx_pkt, external_tx != NULL, rss_info);
>           if (rss_info->queue & 1) {
>               queues <<= 8;
> @@ -1486,7 +1498,7 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
>       static const int maximum_ethernet_hdr_len = (ETH_HLEN + 4);
>   
>       uint16_t queues = 0;
> -    uint32_t n;
> +    uint32_t n = 0;
>       uint8_t min_buf[ETH_ZLEN];
>       struct iovec min_iov;
>       struct eth_header *ehdr;
> @@ -1566,26 +1578,22 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
>           }
>   
>           igb_rx_ring_init(core, &rxr, i);
> -
> -        trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
> -

I think you have seen my earlier email, but just in case: please move 
the fix included in a later patch to this.

>           if (!igb_has_rxbufs(core, rxr.i, total_size)) {
>               retval = 0;
>           }
>       }
>   
>       if (retval) {
> -        n = E1000_ICR_RXT0;
> -
>           igb_rx_fix_l4_csum(core, core->rx_pkt);
>   
>           for (i = 0; i < IGB_NUM_QUEUES; i++) {
> -            if (!(queues & BIT(i))) {
> +            if (!(queues & BIT(i)) ||
> +                !(core->mac[E1000_RXDCTL(i) >> 2] & E1000_RXDCTL_QUEUE_ENABLE)) {
>                   continue;
>               }
>   
>               igb_rx_ring_init(core, &rxr, i);
> -
> +            trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
>               igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info);
>   
>               /* Check if receive descriptor minimum threshold hit */
> @@ -1594,6 +1602,9 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
>               }
>   
>               core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx);
> +
> +            /* same as RXDW (rx descriptor written back)*/
> +            n = E1000_ICR_RXT0;

Place the next patch, "igb: add ICR_RXDW" before this patch and remove 
the comment; the comment will be redundant after that patch.

>           }
>   
>           trace_e1000e_rx_written_to_guest(n);
> @@ -1981,9 +1992,16 @@ static void igb_set_vfmailbox(IGBCore *core, int index, uint32_t val)
>   
>   static void igb_vf_reset(IGBCore *core, uint16_t vfn)
>   {
> +    uint16_t qn0 = vfn;
> +    uint16_t qn1 = vfn + IGB_MAX_VF_FUNCTIONS;
> +
>       /* disable Rx and Tx for the VF*/
> -    core->mac[VFTE] &= ~BIT(vfn);
> +    core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
> +    core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
> +    core->mac[TXDCTL0 + (qn0 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
> +    core->mac[TXDCTL0 + (qn1 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
>       core->mac[VFRE] &= ~BIT(vfn);
> +    core->mac[VFTE] &= ~BIT(vfn);
>       /* indicate VF reset to PF */
>       core->mac[VFLRE] |= BIT(vfn);
>       /* VFLRE and mailbox use the same interrupt cause */
> @@ -3889,6 +3907,7 @@ igb_phy_reg_init[] = {
>   static const uint32_t igb_mac_reg_init[] = {
>       [LEDCTL]        = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24),
>       [EEMNGCTL]      = BIT(31),
> +    [TXDCTL0]       = E1000_TXDCTL_QUEUE_ENABLE,
>       [RXDCTL0]       = E1000_RXDCTL_QUEUE_ENABLE | (1 << 16),
>       [RXDCTL1]       = 1 << 16,
>       [RXDCTL2]       = 1 << 16,
> diff --git a/hw/net/igb_regs.h b/hw/net/igb_regs.h
> index ebf3e95023..c8ce5b1671 100644
> --- a/hw/net/igb_regs.h
> +++ b/hw/net/igb_regs.h
> @@ -147,6 +147,7 @@ union e1000_adv_rx_desc {
>   #define IGB_MAX_TX_QUEUES          8
>   #define IGB_MAX_VF_MC_ENTRIES      30
>   #define IGB_MAX_VF_FUNCTIONS       8
> +#define IGB_MAX_VM_POOLS           8
>   #define IGB_MAX_VFTA_ENTRIES       128
>   #define IGB_82576_VF_DEV_ID        0x10CA
>   #define IGB_I350_VF_DEV_ID         0x1520
> @@ -160,7 +161,8 @@ union e1000_adv_rx_desc {
>   #define E1000_MRQC_RSS_FIELD_IPV6_UDP       0x00800000
>   #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX    0x01000000
>   
> -/* Additional Receive Descriptor Control definitions */
> +/* Additional RX/TX Descriptor Control definitions */
> +#define E1000_TXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Tx Queue */
>   #define E1000_RXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Rx Queue */
>   
>   /* Direct Cache Access (DCA) definitions */
Akihiko Odaki Jan. 30, 2023, 2:23 p.m. UTC | #2
On 2023/01/30 23:19, Akihiko Odaki wrote:
> On 2023/01/30 22:22, Sriram Yagnaraman wrote:
>> Also add checks for RXDCTL/TXDCTL queue enable bits
>>
>> Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
>> ---
>>   hw/net/igb_core.c | 41 ++++++++++++++++++++++++++++++-----------
>>   hw/net/igb_regs.h |  4 +++-
>>   2 files changed, 33 insertions(+), 12 deletions(-)
>>
>> diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c
>> index 9bd53cc25f..b8c01cb773 100644
>> --- a/hw/net/igb_core.c
>> +++ b/hw/net/igb_core.c
>> @@ -778,6 +778,18 @@ igb_txdesc_writeback(IGBCore *core, dma_addr_t base,
>>       return igb_tx_wb_eic(core, txi->idx);
>>   }
>> +static inline bool
>> +igb_tx_enabled(IGBCore *core, const E1000E_RingInfo *txi)
>> +{
>> +    bool vmdq = core->mac[MRQC] & 1;
>> +    uint16_t qn = txi->idx;
>> +    uint16_t vfn = qn % IGB_MAX_VM_POOLS;
> 
> Rename vfn to pool as you did in other places.
> 
>> +
>> +    return (core->mac[TCTL] & E1000_TCTL_EN) &&
>> +        (!vmdq || core->mac[VFTE] & BIT(vfn)) &&
>> +        (core->mac[TXDCTL0 + (qn * 16)] & E1000_TXDCTL_QUEUE_ENABLE);
>> +}
>> +
>>   static void
>>   igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
>>   {
>> @@ -787,8 +799,7 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
>>       const E1000E_RingInfo *txi = txr->i;
>>       uint32_t eic = 0;
>> -    /* TODO: check if the queue itself is enabled too. */
>> -    if (!(core->mac[TCTL] & E1000_TCTL_EN)) {
>> +    if (!igb_tx_enabled(core, txi)) {
>>           trace_e1000e_tx_disabled();
>>           return;
>>       }
>> @@ -1003,6 +1014,7 @@ static uint16_t igb_receive_assign(IGBCore 
>> *core, const struct eth_header *ehdr,
>>               queues = BIT(def_pl >> E1000_VT_CTL_DEFAULT_POOL_SHIFT);
>>           }
>> +        queues &= core->mac[VFRE];
>>           igb_rss_parse_packet(core, core->rx_pkt, external_tx != 
>> NULL, rss_info);
>>           if (rss_info->queue & 1) {
>>               queues <<= 8;
>> @@ -1486,7 +1498,7 @@ igb_receive_internal(IGBCore *core, const struct 
>> iovec *iov, int iovcnt,
>>       static const int maximum_ethernet_hdr_len = (ETH_HLEN + 4);
>>       uint16_t queues = 0;
>> -    uint32_t n;
>> +    uint32_t n = 0;
>>       uint8_t min_buf[ETH_ZLEN];
>>       struct iovec min_iov;
>>       struct eth_header *ehdr;
>> @@ -1566,26 +1578,22 @@ igb_receive_internal(IGBCore *core, const 
>> struct iovec *iov, int iovcnt,
>>           }
>>           igb_rx_ring_init(core, &rxr, i);
>> -
>> -        trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
>> -
> 
> I think you have seen my earlier email, but just in case: please move 
> the fix included in a later patch to this.
> 
>>           if (!igb_has_rxbufs(core, rxr.i, total_size)) {
>>               retval = 0;
>>           }
>>       }
>>       if (retval) {
>> -        n = E1000_ICR_RXT0;
>> -
>>           igb_rx_fix_l4_csum(core, core->rx_pkt);
>>           for (i = 0; i < IGB_NUM_QUEUES; i++) {
>> -            if (!(queues & BIT(i))) {
>> +            if (!(queues & BIT(i)) ||
>> +                !(core->mac[E1000_RXDCTL(i) >> 2] & 
>> E1000_RXDCTL_QUEUE_ENABLE)) {
>>                   continue;
>>               }
>>               igb_rx_ring_init(core, &rxr, i);
>> -
>> +            trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
>>               igb_write_packet_to_guest(core, core->rx_pkt, &rxr, 
>> &rss_info);
>>               /* Check if receive descriptor minimum threshold hit */
>> @@ -1594,6 +1602,9 @@ igb_receive_internal(IGBCore *core, const struct 
>> iovec *iov, int iovcnt,
>>               }
>>               core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx);
>> +
>> +            /* same as RXDW (rx descriptor written back)*/
>> +            n = E1000_ICR_RXT0;
> 
> Place the next patch, "igb: add ICR_RXDW" before this patch and remove 
> the comment; the comment will be redundant after that patch.
> 
>>           }
>>           trace_e1000e_rx_written_to_guest(n);
>> @@ -1981,9 +1992,16 @@ static void igb_set_vfmailbox(IGBCore *core, 
>> int index, uint32_t val)
>>   static void igb_vf_reset(IGBCore *core, uint16_t vfn)
>>   {
>> +    uint16_t qn0 = vfn;
>> +    uint16_t qn1 = vfn + IGB_MAX_VF_FUNCTIONS;
>> +
>>       /* disable Rx and Tx for the VF*/
>> -    core->mac[VFTE] &= ~BIT(vfn);
>> +    core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
>> +    core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
>> +    core->mac[TXDCTL0 + (qn0 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
>> +    core->mac[TXDCTL0 + (qn1 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
>>       core->mac[VFRE] &= ~BIT(vfn);
>> +    core->mac[VFTE] &= ~BIT(vfn);
>>       /* indicate VF reset to PF */
>>       core->mac[VFLRE] |= BIT(vfn);
>>       /* VFLRE and mailbox use the same interrupt cause */
>> @@ -3889,6 +3907,7 @@ igb_phy_reg_init[] = {
>>   static const uint32_t igb_mac_reg_init[] = {
>>       [LEDCTL]        = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24),
>>       [EEMNGCTL]      = BIT(31),
>> +    [TXDCTL0]       = E1000_TXDCTL_QUEUE_ENABLE,
>>       [RXDCTL0]       = E1000_RXDCTL_QUEUE_ENABLE | (1 << 16),
>>       [RXDCTL1]       = 1 << 16,
>>       [RXDCTL2]       = 1 << 16,
>> diff --git a/hw/net/igb_regs.h b/hw/net/igb_regs.h
>> index ebf3e95023..c8ce5b1671 100644
>> --- a/hw/net/igb_regs.h
>> +++ b/hw/net/igb_regs.h
>> @@ -147,6 +147,7 @@ union e1000_adv_rx_desc {
>>   #define IGB_MAX_TX_QUEUES          8
>>   #define IGB_MAX_VF_MC_ENTRIES      30
>>   #define IGB_MAX_VF_FUNCTIONS       8
>> +#define IGB_MAX_VM_POOLS           8
>>   #define IGB_MAX_VFTA_ENTRIES       128
>>   #define IGB_82576_VF_DEV_ID        0x10CA
>>   #define IGB_I350_VF_DEV_ID         0x1520
>> @@ -160,7 +161,8 @@ union e1000_adv_rx_desc {
>>   #define E1000_MRQC_RSS_FIELD_IPV6_UDP       0x00800000
>>   #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX    0x01000000
>> -/* Additional Receive Descriptor Control definitions */
>> +/* Additional RX/TX Descriptor Control definitions */
>> +#define E1000_TXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Tx 
>> Queue */
>>   #define E1000_RXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Rx 
>> Queue */
>>   /* Direct Cache Access (DCA) definitions */

Also: igb_regs.h is copied from Linux. Copy the definition from:
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/net/ethernet/intel/igb/e1000_82575.h?h=v6.1#n140

...and paste it to the corresponding place.
diff mbox series

Patch

diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c
index 9bd53cc25f..b8c01cb773 100644
--- a/hw/net/igb_core.c
+++ b/hw/net/igb_core.c
@@ -778,6 +778,18 @@  igb_txdesc_writeback(IGBCore *core, dma_addr_t base,
     return igb_tx_wb_eic(core, txi->idx);
 }
 
+static inline bool
+igb_tx_enabled(IGBCore *core, const E1000E_RingInfo *txi)
+{
+    bool vmdq = core->mac[MRQC] & 1;
+    uint16_t qn = txi->idx;
+    uint16_t vfn = qn % IGB_MAX_VM_POOLS;
+
+    return (core->mac[TCTL] & E1000_TCTL_EN) &&
+        (!vmdq || core->mac[VFTE] & BIT(vfn)) &&
+        (core->mac[TXDCTL0 + (qn * 16)] & E1000_TXDCTL_QUEUE_ENABLE);
+}
+
 static void
 igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
 {
@@ -787,8 +799,7 @@  igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
     const E1000E_RingInfo *txi = txr->i;
     uint32_t eic = 0;
 
-    /* TODO: check if the queue itself is enabled too. */
-    if (!(core->mac[TCTL] & E1000_TCTL_EN)) {
+    if (!igb_tx_enabled(core, txi)) {
         trace_e1000e_tx_disabled();
         return;
     }
@@ -1003,6 +1014,7 @@  static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr,
             queues = BIT(def_pl >> E1000_VT_CTL_DEFAULT_POOL_SHIFT);
         }
 
+        queues &= core->mac[VFRE];
         igb_rss_parse_packet(core, core->rx_pkt, external_tx != NULL, rss_info);
         if (rss_info->queue & 1) {
             queues <<= 8;
@@ -1486,7 +1498,7 @@  igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
     static const int maximum_ethernet_hdr_len = (ETH_HLEN + 4);
 
     uint16_t queues = 0;
-    uint32_t n;
+    uint32_t n = 0;
     uint8_t min_buf[ETH_ZLEN];
     struct iovec min_iov;
     struct eth_header *ehdr;
@@ -1566,26 +1578,22 @@  igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
         }
 
         igb_rx_ring_init(core, &rxr, i);
-
-        trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
-
         if (!igb_has_rxbufs(core, rxr.i, total_size)) {
             retval = 0;
         }
     }
 
     if (retval) {
-        n = E1000_ICR_RXT0;
-
         igb_rx_fix_l4_csum(core, core->rx_pkt);
 
         for (i = 0; i < IGB_NUM_QUEUES; i++) {
-            if (!(queues & BIT(i))) {
+            if (!(queues & BIT(i)) ||
+                !(core->mac[E1000_RXDCTL(i) >> 2] & E1000_RXDCTL_QUEUE_ENABLE)) {
                 continue;
             }
 
             igb_rx_ring_init(core, &rxr, i);
-
+            trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
             igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info);
 
             /* Check if receive descriptor minimum threshold hit */
@@ -1594,6 +1602,9 @@  igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
             }
 
             core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx);
+
+            /* same as RXDW (rx descriptor written back)*/
+            n = E1000_ICR_RXT0;
         }
 
         trace_e1000e_rx_written_to_guest(n);
@@ -1981,9 +1992,16 @@  static void igb_set_vfmailbox(IGBCore *core, int index, uint32_t val)
 
 static void igb_vf_reset(IGBCore *core, uint16_t vfn)
 {
+    uint16_t qn0 = vfn;
+    uint16_t qn1 = vfn + IGB_MAX_VF_FUNCTIONS;
+
     /* disable Rx and Tx for the VF*/
-    core->mac[VFTE] &= ~BIT(vfn);
+    core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
+    core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
+    core->mac[TXDCTL0 + (qn0 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
+    core->mac[TXDCTL0 + (qn1 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
     core->mac[VFRE] &= ~BIT(vfn);
+    core->mac[VFTE] &= ~BIT(vfn);
     /* indicate VF reset to PF */
     core->mac[VFLRE] |= BIT(vfn);
     /* VFLRE and mailbox use the same interrupt cause */
@@ -3889,6 +3907,7 @@  igb_phy_reg_init[] = {
 static const uint32_t igb_mac_reg_init[] = {
     [LEDCTL]        = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24),
     [EEMNGCTL]      = BIT(31),
+    [TXDCTL0]       = E1000_TXDCTL_QUEUE_ENABLE,
     [RXDCTL0]       = E1000_RXDCTL_QUEUE_ENABLE | (1 << 16),
     [RXDCTL1]       = 1 << 16,
     [RXDCTL2]       = 1 << 16,
diff --git a/hw/net/igb_regs.h b/hw/net/igb_regs.h
index ebf3e95023..c8ce5b1671 100644
--- a/hw/net/igb_regs.h
+++ b/hw/net/igb_regs.h
@@ -147,6 +147,7 @@  union e1000_adv_rx_desc {
 #define IGB_MAX_TX_QUEUES          8
 #define IGB_MAX_VF_MC_ENTRIES      30
 #define IGB_MAX_VF_FUNCTIONS       8
+#define IGB_MAX_VM_POOLS           8
 #define IGB_MAX_VFTA_ENTRIES       128
 #define IGB_82576_VF_DEV_ID        0x10CA
 #define IGB_I350_VF_DEV_ID         0x1520
@@ -160,7 +161,8 @@  union e1000_adv_rx_desc {
 #define E1000_MRQC_RSS_FIELD_IPV6_UDP       0x00800000
 #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX    0x01000000
 
-/* Additional Receive Descriptor Control definitions */
+/* Additional RX/TX Descriptor Control definitions */
+#define E1000_TXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Tx Queue */
 #define E1000_RXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Rx Queue */
 
 /* Direct Cache Access (DCA) definitions */