@@ -292,6 +292,11 @@ struct dpdk_mp {
struct ovs_list list_node OVS_GUARDED_BY(dpdk_mp_mutex);
};
+/* Queue 'INTERIM_QUEUE_BURST_THRESHOLD' packets before tranmitting.
+ * Defaults to 'NETDEV_MAX_BURST'(32) packets.
+ */
+#define INTERIM_QUEUE_BURST_THRESHOLD NETDEV_MAX_BURST
+
/* There should be one 'struct dpdk_tx_queue' created for
* each cpu core. */
struct dpdk_tx_queue {
@@ -301,6 +306,12 @@ struct dpdk_tx_queue {
* pmd threads (see 'concurrent_txq'). */
int map; /* Mapping of configured vhost-user queues
* to enabled by guest. */
+ int count; /* Number of buffered packets waiting to
+ be sent. */
+ struct rte_mbuf *burst_pkts[INTERIM_QUEUE_BURST_THRESHOLD];
+ /* Intermediate queue where packets can
+ * be buffered to amortize the cost of MMIO
+ * writes. */
};
/* dpdk has no way to remove dpdk ring ethernet devices
@@ -1892,9 +1903,24 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
* few packets (< INTERIM_QUEUE_BURST_THRESHOLD) are buffered in the queue.
*/
static int
-netdev_dpdk_txq_drain(struct netdev *netdev OVS_UNUSED,
- int qid OVS_UNUSED, bool concurrent_txq OVS_UNUSED)
+netdev_dpdk_txq_drain(struct netdev *netdev,
+ int qid, bool concurrent_txq)
{
+ struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+ struct dpdk_tx_queue *txq = &dev->tx_q[qid];
+
+ if (OVS_LIKELY(txq->count)) {
+ if (OVS_UNLIKELY(concurrent_txq)) {
+ qid = qid % dev->up.n_txq;
+ rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
+ }
+
+ netdev_dpdk_eth_tx_burst(dev, qid, txq->burst_pkts, txq->count);
+
+ if (OVS_UNLIKELY(concurrent_txq)) {
+ rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
+ }
+ }
return 0;
}