diff mbox

axienet: Flush queued packets when rx is done

Message ID 1435900462-16479-1-git-send-email-famz@redhat.com
State New
Headers show

Commit Message

Fam Zheng July 3, 2015, 5:14 a.m. UTC
eth_can_rx checks s->rxsize and returns false if it is non-zero. Because
of the .can_receive semantics change, this will make the incoming queue
disabled by peer, until it is explicitly flushed. So we should flush it
when s->rxsize is becoming zero.

Do it by adding a BH that calls qemu_flush_queued_packets after
decrementing s->rxsize in axienet_eth_rx_notify. BH is necessary to
avoid too deep recursive call stack.

The other conditions, "!axienet_rx_resetting(s) &&
axienet_rx_enabled(s)" are OK because enet_write already calls
qemu_flush_queued_packets when the register bits are changed.

Signed-off-by: Fam Zheng <famz@redhat.com>

---

Only tested building because I don't have a xilinx image to start a
guest.
---
 hw/net/xilinx_axienet.c | 22 ++++++++++++++++++++++
 1 file changed, 22 insertions(+)
diff mbox

Patch

diff --git a/hw/net/xilinx_axienet.c b/hw/net/xilinx_axienet.c
index 9205770..bbc0ea8 100644
--- a/hw/net/xilinx_axienet.c
+++ b/hw/net/xilinx_axienet.c
@@ -28,6 +28,7 @@ 
 #include "net/checksum.h"
 
 #include "hw/stream.h"
+#include "qemu/main-loop.h"
 
 #define DPHY(x)
 
@@ -401,6 +402,8 @@  struct XilinxAXIEnet {
 
     uint8_t rxapp[CONTROL_PAYLOAD_SIZE];
     uint32_t rxappsize;
+
+    QEMUBH *flush_bh;
 };
 
 static void axienet_rx_reset(XilinxAXIEnet *s)
@@ -681,8 +684,15 @@  static int enet_match_addr(const uint8_t *buf, uint32_t f0, uint32_t f1)
     return match;
 }
 
+static void xilinx_flush_cb(void *opaque)
+{
+    XilinxAXIEnet *s = opaque;
+    qemu_flush_queued_packets(qemu_get_queue(s->nic));
+}
+
 static void axienet_eth_rx_notify(void *opaque)
 {
+    bool flush = false;
     XilinxAXIEnet *s = XILINX_AXI_ENET(opaque);
 
     while (s->rxappsize && stream_can_push(s->tx_control_dev,
@@ -701,9 +711,13 @@  static void axienet_eth_rx_notify(void *opaque)
         s->rxpos += ret;
         if (!s->rxsize) {
             s->regs[R_IS] |= IS_RX_COMPLETE;
+            flush = true;
         }
     }
     enet_update_irq(s);
+    if (flush) {
+        qemu_bh_schedule(s->flush_bh);
+    }
 }
 
 static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size)
@@ -967,6 +981,7 @@  static void xilinx_enet_realize(DeviceState *dev, Error **errp)
     s->TEMAC.parent = s;
 
     s->rxmem = g_malloc(s->c_rxmem);
+    s->flush_bh = qemu_bh_new(xilinx_flush_cb, s);
     return;
 
 xilinx_enet_realize_fail:
@@ -975,6 +990,12 @@  xilinx_enet_realize_fail:
     }
 }
 
+static void xilinx_enet_unrealize(DeviceState *dev, Error **errp)
+{
+    XilinxAXIEnet *s = XILINX_AXI_ENET(dev);
+    qemu_bh_delete(s->flush_bh);
+}
+
 static void xilinx_enet_init(Object *obj)
 {
     XilinxAXIEnet *s = XILINX_AXI_ENET(obj);
@@ -1020,6 +1041,7 @@  static void xilinx_enet_class_init(ObjectClass *klass, void *data)
     DeviceClass *dc = DEVICE_CLASS(klass);
 
     dc->realize = xilinx_enet_realize;
+    dc->unrealize = xilinx_enet_unrealize;
     dc->props = xilinx_enet_properties;
     dc->reset = xilinx_axienet_reset;
 }