diff mbox

[v2,for-2.4,12/12] axienet: Flush queued packets when rx is done

Message ID 1436955553-22791-13-git-send-email-famz@redhat.com
State New
Headers show

Commit Message

Fam Zheng July 15, 2015, 10:19 a.m. UTC
eth_can_rx checks s->rxsize and returns false if it is non-zero. Because
of the .can_receive semantics change, this will make the incoming queue
disabled by peer, until it is explicitly flushed. So we should flush it
when s->rxsize is becoming zero.

Squash eth_can_rx semantics into etx_rx and drop .can_receive()
callback, also add flush when rx buffer becomes available again after a
packet gets queued.

The other conditions, "!axienet_rx_resetting(s) &&
axienet_rx_enabled(s)" are OK because enet_write already calls
qemu_flush_queued_packets when the register bits are changed.

Signed-off-by: Fam Zheng <famz@redhat.com>
---
 hw/net/xilinx_axienet.c | 17 +++++++++++++----
 1 file changed, 13 insertions(+), 4 deletions(-)

Comments

Jason Wang July 16, 2015, 2:58 a.m. UTC | #1
On 07/15/2015 06:19 PM, Fam Zheng wrote:
> eth_can_rx checks s->rxsize and returns false if it is non-zero. Because
> of the .can_receive semantics change, this will make the incoming queue
> disabled by peer, until it is explicitly flushed. So we should flush it
> when s->rxsize is becoming zero.
>
> Squash eth_can_rx semantics into etx_rx and drop .can_receive()
> callback, also add flush when rx buffer becomes available again after a
> packet gets queued.
>
> The other conditions, "!axienet_rx_resetting(s) &&
> axienet_rx_enabled(s)" are OK because enet_write already calls
> qemu_flush_queued_packets when the register bits are changed.
>
> Signed-off-by: Fam Zheng <famz@redhat.com>
> ---
>  hw/net/xilinx_axienet.c | 17 +++++++++++++----
>  1 file changed, 13 insertions(+), 4 deletions(-)
>
> diff --git a/hw/net/xilinx_axienet.c b/hw/net/xilinx_axienet.c
> index 9205770..d63c423 100644
> --- a/hw/net/xilinx_axienet.c
> +++ b/hw/net/xilinx_axienet.c
> @@ -401,6 +401,9 @@ struct XilinxAXIEnet {
>  
>      uint8_t rxapp[CONTROL_PAYLOAD_SIZE];
>      uint32_t rxappsize;
> +
> +    /* Whether axienet_eth_rx_notify should flush incoming queue. */
> +    bool need_flush;
>  };
>  
>  static void axienet_rx_reset(XilinxAXIEnet *s)
> @@ -658,10 +661,8 @@ static const MemoryRegionOps enet_ops = {
>      .endianness = DEVICE_LITTLE_ENDIAN,
>  };
>  
> -static int eth_can_rx(NetClientState *nc)
> +static int eth_can_rx(XilinxAXIEnet *s)
>  {
> -    XilinxAXIEnet *s = qemu_get_nic_opaque(nc);
> -
>      /* RX enabled?  */
>      return !s->rxsize && !axienet_rx_resetting(s) && axienet_rx_enabled(s);
>  }
> @@ -701,6 +702,10 @@ static void axienet_eth_rx_notify(void *opaque)
>          s->rxpos += ret;
>          if (!s->rxsize) {
>              s->regs[R_IS] |= IS_RX_COMPLETE;
> +            if (s->need_flush) {
> +                s->need_flush = false;
> +                qemu_flush_queued_packets(qemu_get_queue(s->nic));
> +            }
>          }
>      }
>      enet_update_irq(s);
> @@ -721,6 +726,11 @@ static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size)
>  
>      DENET(qemu_log("%s: %zd bytes\n", __func__, size));
>  
> +    if (!eth_can_rx(s)) {
> +        s->need_flush = true;
> +        return 0;
> +    }
> +

axienet_eth_rx_notify() was only called by eth_rx(). So when
s->need_flush is true, we won't ever reach axienet_eth_rx_notify()?

>      unicast = ~buf[0] & 0x1;
>      broadcast = memcmp(buf, sa_bcast, 6) == 0;
>      multicast = !unicast && !broadcast;
> @@ -925,7 +935,6 @@ xilinx_axienet_data_stream_push(StreamSlave *obj, uint8_t *buf, size_t size)
>  static NetClientInfo net_xilinx_enet_info = {
>      .type = NET_CLIENT_OPTIONS_KIND_NIC,
>      .size = sizeof(NICState),
> -    .can_receive = eth_can_rx,
>      .receive = eth_rx,
>  };
>
Fam Zheng July 16, 2015, 3:32 a.m. UTC | #2
On Thu, 07/16 10:58, Jason Wang wrote:
> 
> 
> On 07/15/2015 06:19 PM, Fam Zheng wrote:
> > eth_can_rx checks s->rxsize and returns false if it is non-zero. Because
> > of the .can_receive semantics change, this will make the incoming queue
> > disabled by peer, until it is explicitly flushed. So we should flush it
> > when s->rxsize is becoming zero.
> >
> > Squash eth_can_rx semantics into etx_rx and drop .can_receive()
> > callback, also add flush when rx buffer becomes available again after a
> > packet gets queued.
> >
> > The other conditions, "!axienet_rx_resetting(s) &&
> > axienet_rx_enabled(s)" are OK because enet_write already calls
> > qemu_flush_queued_packets when the register bits are changed.
> >
> > Signed-off-by: Fam Zheng <famz@redhat.com>
> > ---
> >  hw/net/xilinx_axienet.c | 17 +++++++++++++----
> >  1 file changed, 13 insertions(+), 4 deletions(-)
> >
> > diff --git a/hw/net/xilinx_axienet.c b/hw/net/xilinx_axienet.c
> > index 9205770..d63c423 100644
> > --- a/hw/net/xilinx_axienet.c
> > +++ b/hw/net/xilinx_axienet.c
> > @@ -401,6 +401,9 @@ struct XilinxAXIEnet {
> >  
> >      uint8_t rxapp[CONTROL_PAYLOAD_SIZE];
> >      uint32_t rxappsize;
> > +
> > +    /* Whether axienet_eth_rx_notify should flush incoming queue. */
> > +    bool need_flush;
> >  };
> >  
> >  static void axienet_rx_reset(XilinxAXIEnet *s)
> > @@ -658,10 +661,8 @@ static const MemoryRegionOps enet_ops = {
> >      .endianness = DEVICE_LITTLE_ENDIAN,
> >  };
> >  
> > -static int eth_can_rx(NetClientState *nc)
> > +static int eth_can_rx(XilinxAXIEnet *s)
> >  {
> > -    XilinxAXIEnet *s = qemu_get_nic_opaque(nc);
> > -
> >      /* RX enabled?  */
> >      return !s->rxsize && !axienet_rx_resetting(s) && axienet_rx_enabled(s);
> >  }
> > @@ -701,6 +702,10 @@ static void axienet_eth_rx_notify(void *opaque)
> >          s->rxpos += ret;
> >          if (!s->rxsize) {
> >              s->regs[R_IS] |= IS_RX_COMPLETE;
> > +            if (s->need_flush) {
> > +                s->need_flush = false;
> > +                qemu_flush_queued_packets(qemu_get_queue(s->nic));
> > +            }
> >          }
> >      }
> >      enet_update_irq(s);
> > @@ -721,6 +726,11 @@ static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size)
> >  
> >      DENET(qemu_log("%s: %zd bytes\n", __func__, size));
> >  
> > +    if (!eth_can_rx(s)) {
> > +        s->need_flush = true;
> > +        return 0;
> > +    }
> > +
> 
> axienet_eth_rx_notify() was only called by eth_rx(). So when
> s->need_flush is true, we won't ever reach axienet_eth_rx_notify()?

We will.

If we are here it measn a previous call to axienet_eth_rx_notify hasn't drained
the buffer:

    static void axienet_eth_rx_notify(void *opaque)
    {
        ...

        while (s->rxsize && stream_can_push(s->tx_data_dev,
                                            axienet_eth_rx_notify, s)) {
            size_t ret = stream_push(s->tx_data_dev, (void *)s->rxmem + s->rxpos,
                                     s->rxsize);
            s->rxsize -= ret;
            s->rxpos += ret;
            if (!s->rxsize) {
                s->regs[R_IS] |= IS_RX_COMPLETE;
            }
        }
        ...

    }

axienet_eth_rx_notify is passed to stream_can_push so it will be reached again
once s->tx_data_dev can receive more data:

    typedef struct StreamSlaveClass {
        InterfaceClass parent;
        /**
         * can push - determine if a stream slave is capable of accepting at least
         * one byte of data. Returns false if cannot accept. If not implemented, the
         * slave is assumed to always be capable of receiving.
         * @notify: Optional callback that the slave will call when the slave is
         * capable of receiving again. Only called if false is returned.
         * @notify_opaque: opaque data to pass to notify call.
         */
        bool (*can_push)(StreamSlave *obj, StreamCanPushNotifyFn notify,
                         void *notify_opaque);
        ...

Am I missing anything?

Fam
Jason Wang July 16, 2015, 5:38 a.m. UTC | #3
On 07/16/2015 11:32 AM, Fam Zheng wrote:
> On Thu, 07/16 10:58, Jason Wang wrote:
>>
>> On 07/15/2015 06:19 PM, Fam Zheng wrote:
>>> eth_can_rx checks s->rxsize and returns false if it is non-zero. Because
>>> of the .can_receive semantics change, this will make the incoming queue
>>> disabled by peer, until it is explicitly flushed. So we should flush it
>>> when s->rxsize is becoming zero.
>>>
>>> Squash eth_can_rx semantics into etx_rx and drop .can_receive()
>>> callback, also add flush when rx buffer becomes available again after a
>>> packet gets queued.
>>>
>>> The other conditions, "!axienet_rx_resetting(s) &&
>>> axienet_rx_enabled(s)" are OK because enet_write already calls
>>> qemu_flush_queued_packets when the register bits are changed.
>>>
>>> Signed-off-by: Fam Zheng <famz@redhat.com>
>>> ---
>>>  hw/net/xilinx_axienet.c | 17 +++++++++++++----
>>>  1 file changed, 13 insertions(+), 4 deletions(-)
>>>
>>> diff --git a/hw/net/xilinx_axienet.c b/hw/net/xilinx_axienet.c
>>> index 9205770..d63c423 100644
>>> --- a/hw/net/xilinx_axienet.c
>>> +++ b/hw/net/xilinx_axienet.c
>>> @@ -401,6 +401,9 @@ struct XilinxAXIEnet {
>>>  
>>>      uint8_t rxapp[CONTROL_PAYLOAD_SIZE];
>>>      uint32_t rxappsize;
>>> +
>>> +    /* Whether axienet_eth_rx_notify should flush incoming queue. */
>>> +    bool need_flush;
>>>  };
>>>  
>>>  static void axienet_rx_reset(XilinxAXIEnet *s)
>>> @@ -658,10 +661,8 @@ static const MemoryRegionOps enet_ops = {
>>>      .endianness = DEVICE_LITTLE_ENDIAN,
>>>  };
>>>  
>>> -static int eth_can_rx(NetClientState *nc)
>>> +static int eth_can_rx(XilinxAXIEnet *s)
>>>  {
>>> -    XilinxAXIEnet *s = qemu_get_nic_opaque(nc);
>>> -
>>>      /* RX enabled?  */
>>>      return !s->rxsize && !axienet_rx_resetting(s) && axienet_rx_enabled(s);
>>>  }
>>> @@ -701,6 +702,10 @@ static void axienet_eth_rx_notify(void *opaque)
>>>          s->rxpos += ret;
>>>          if (!s->rxsize) {
>>>              s->regs[R_IS] |= IS_RX_COMPLETE;
>>> +            if (s->need_flush) {
>>> +                s->need_flush = false;
>>> +                qemu_flush_queued_packets(qemu_get_queue(s->nic));
>>> +            }
>>>          }
>>>      }
>>>      enet_update_irq(s);
>>> @@ -721,6 +726,11 @@ static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size)
>>>  
>>>      DENET(qemu_log("%s: %zd bytes\n", __func__, size));
>>>  
>>> +    if (!eth_can_rx(s)) {
>>> +        s->need_flush = true;
>>> +        return 0;
>>> +    }
>>> +
>> axienet_eth_rx_notify() was only called by eth_rx(). So when
>> s->need_flush is true, we won't ever reach axienet_eth_rx_notify()?
> We will.
>
> If we are here it measn a previous call to axienet_eth_rx_notify hasn't drained
> the buffer:
>
>     static void axienet_eth_rx_notify(void *opaque)
>     {
>         ...
>
>         while (s->rxsize && stream_can_push(s->tx_data_dev,
>                                             axienet_eth_rx_notify, s)) {
>             size_t ret = stream_push(s->tx_data_dev, (void *)s->rxmem + s->rxpos,
>                                      s->rxsize);
>             s->rxsize -= ret;
>             s->rxpos += ret;
>             if (!s->rxsize) {
>                 s->regs[R_IS] |= IS_RX_COMPLETE;
>             }
>         }
>         ...
>
>     }
>
> axienet_eth_rx_notify is passed to stream_can_push so it will be reached again
> once s->tx_data_dev can receive more data:
>
>     typedef struct StreamSlaveClass {
>         InterfaceClass parent;
>         /**
>          * can push - determine if a stream slave is capable of accepting at least
>          * one byte of data. Returns false if cannot accept. If not implemented, the
>          * slave is assumed to always be capable of receiving.
>          * @notify: Optional callback that the slave will call when the slave is
>          * capable of receiving again. Only called if false is returned.
>          * @notify_opaque: opaque data to pass to notify call.
>          */
>         bool (*can_push)(StreamSlave *obj, StreamCanPushNotifyFn notify,
>                          void *notify_opaque);
>         ...
>
> Am I missing anything?
>
> Fam

Probably not. I misses the possible call in axidma_write(). So the patch
is ok :)
diff mbox

Patch

diff --git a/hw/net/xilinx_axienet.c b/hw/net/xilinx_axienet.c
index 9205770..d63c423 100644
--- a/hw/net/xilinx_axienet.c
+++ b/hw/net/xilinx_axienet.c
@@ -401,6 +401,9 @@  struct XilinxAXIEnet {
 
     uint8_t rxapp[CONTROL_PAYLOAD_SIZE];
     uint32_t rxappsize;
+
+    /* Whether axienet_eth_rx_notify should flush incoming queue. */
+    bool need_flush;
 };
 
 static void axienet_rx_reset(XilinxAXIEnet *s)
@@ -658,10 +661,8 @@  static const MemoryRegionOps enet_ops = {
     .endianness = DEVICE_LITTLE_ENDIAN,
 };
 
-static int eth_can_rx(NetClientState *nc)
+static int eth_can_rx(XilinxAXIEnet *s)
 {
-    XilinxAXIEnet *s = qemu_get_nic_opaque(nc);
-
     /* RX enabled?  */
     return !s->rxsize && !axienet_rx_resetting(s) && axienet_rx_enabled(s);
 }
@@ -701,6 +702,10 @@  static void axienet_eth_rx_notify(void *opaque)
         s->rxpos += ret;
         if (!s->rxsize) {
             s->regs[R_IS] |= IS_RX_COMPLETE;
+            if (s->need_flush) {
+                s->need_flush = false;
+                qemu_flush_queued_packets(qemu_get_queue(s->nic));
+            }
         }
     }
     enet_update_irq(s);
@@ -721,6 +726,11 @@  static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size)
 
     DENET(qemu_log("%s: %zd bytes\n", __func__, size));
 
+    if (!eth_can_rx(s)) {
+        s->need_flush = true;
+        return 0;
+    }
+
     unicast = ~buf[0] & 0x1;
     broadcast = memcmp(buf, sa_bcast, 6) == 0;
     multicast = !unicast && !broadcast;
@@ -925,7 +935,6 @@  xilinx_axienet_data_stream_push(StreamSlave *obj, uint8_t *buf, size_t size)
 static NetClientInfo net_xilinx_enet_info = {
     .type = NET_CLIENT_OPTIONS_KIND_NIC,
     .size = sizeof(NICState),
-    .can_receive = eth_can_rx,
     .receive = eth_rx,
 };