diff mbox series

[bpf-next,RFCv3,5/6] veth: add AF_XDP RX support.

Message ID 1545856073-8680-6-git-send-email-u9012063@gmail.com
State RFC, archived
Delegated to: BPF Maintainers
Headers show
Series AF_XDP support for veth. | expand

Commit Message

William Tu Dec. 26, 2018, 8:27 p.m. UTC
If the receiving veth side has umem rx enabled, the patch
directly copy the packet from the peer side's send buffer
to the umem receive buffer.  This requires running AF_XDP
as both side of the veth peer.  For example:
Receiver:
  # ip netns exec at_ns0 xdpsock -r -N -z -i p0
Sender:
  # xdpsock -i p1 -t -N -z
The performance increases from 1.4Mpps to 2.3Mpps.

Signed-off-by: William Tu <u9012063@gmail.com>
---
 drivers/net/veth.c | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)
diff mbox series

Patch

diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 551444195398..8aac67554880 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -766,11 +766,28 @@  static int veth_xsk_poll(struct napi_struct *napi, int budget)
 		struct sk_buff *skb;
 		struct page *page;
 		void *vaddr;
+		u64 handle;
 		u32 len;
 
 		if (!xsk_umem_consume_tx_virtual(peer_rq->xsk_umem, &vaddr, &len))
 			break;
 
+		if (rq->xsk_umem && xsk_umem_peek_addr(rq->xsk_umem, &handle)) {
+			char *daddr;
+			u64 hr = 0;
+
+			/* the peer side also has umem enabled,
+			 * copy directly to it.
+			 */
+			handle &= rq->xsk_umem->chunk_mask;
+			hr = rq->xsk_umem->headroom + XDP_PACKET_HEADROOM;
+			daddr = xdp_umem_get_data(rq->xsk_umem, handle);
+			daddr += hr;
+			memcpy((void *)daddr, vaddr, len);
+			xsk_umem_discard_addr(rq->xsk_umem);
+			vaddr = daddr;
+		}
+
 		xdpf.data = vaddr + metasize;
 		xdpf.len = len;
 		xdpf.headroom = 0;