diff mbox series

[bpf-next,4/5] samples/bpf: use Rx-only and Tx-only sockets in xdpsock

Message ID 1573148860-30254-5-git-send-email-magnus.karlsson@intel.com
State Accepted
Delegated to: BPF Maintainers
Headers show
Series Extend libbpf to support shared umems and Rx|Tx-only sockets | expand

Commit Message

Magnus Karlsson Nov. 7, 2019, 5:47 p.m. UTC
Use Rx-only sockets for the rxdrop sample and Tx-only sockets for the
txpush sample in the xdpsock application. This so that we exercise and
show case these socket types too.

Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
---
 samples/bpf/xdpsock_user.c | 41 +++++++++++++++++++++++++++++------------
 1 file changed, 29 insertions(+), 12 deletions(-)

Comments

Jonathan Lemon Nov. 8, 2019, 11:02 p.m. UTC | #1
On 7 Nov 2019, at 9:47, Magnus Karlsson wrote:

> Use Rx-only sockets for the rxdrop sample and Tx-only sockets for the
> txpush sample in the xdpsock application. This so that we exercise and
> show case these socket types too.
>
> Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>

Acked-by: Jonathan Lemon <jonathan.lemon@gmail.com>
William Tu Nov. 10, 2019, 6:34 p.m. UTC | #2
On Fri, Nov 8, 2019 at 3:02 PM Jonathan Lemon <jonathan.lemon@gmail.com> wrote:
>
>
>
> On 7 Nov 2019, at 9:47, Magnus Karlsson wrote:
>
> > Use Rx-only sockets for the rxdrop sample and Tx-only sockets for the
> > txpush sample in the xdpsock application. This so that we exercise and
> > show case these socket types too.
> >
> > Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
>
> Acked-by: Jonathan Lemon <jonathan.lemon@gmail.com>

Tested-by: William Tu <u9012063@gmail.com>
diff mbox series

Patch

diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index d3dba93..a1f96e5 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -291,8 +291,7 @@  static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size)
 		.frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM,
 		.flags = opt_umem_flags
 	};
-	int ret, i;
-	u32 idx;
+	int ret;
 
 	umem = calloc(1, sizeof(*umem));
 	if (!umem)
@@ -300,10 +299,18 @@  static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size)
 
 	ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq,
 			       &cfg);
-
 	if (ret)
 		exit_with_error(-ret);
 
+	umem->buffer = buffer;
+	return umem;
+}
+
+static void xsk_populate_fill_ring(struct xsk_umem_info *umem)
+{
+	int ret, i;
+	u32 idx;
+
 	ret = xsk_ring_prod__reserve(&umem->fq,
 				     XSK_RING_PROD__DEFAULT_NUM_DESCS, &idx);
 	if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS)
@@ -312,15 +319,15 @@  static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size)
 		*xsk_ring_prod__fill_addr(&umem->fq, idx++) =
 			i * opt_xsk_frame_size;
 	xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS);
-
-	umem->buffer = buffer;
-	return umem;
 }
 
-static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem)
+static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem,
+						    bool rx, bool tx)
 {
 	struct xsk_socket_config cfg;
 	struct xsk_socket_info *xsk;
+	struct xsk_ring_cons *rxr;
+	struct xsk_ring_prod *txr;
 	int ret;
 
 	xsk = calloc(1, sizeof(*xsk));
@@ -337,8 +344,10 @@  static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem)
 	cfg.xdp_flags = opt_xdp_flags;
 	cfg.bind_flags = opt_xdp_bind_flags;
 
-	ret = xsk_socket__create(&xsk->xsk, opt_if, opt_queue,
-				 umem->umem, &xsk->rx, &xsk->tx, &cfg);
+	rxr = rx ? &xsk->rx : NULL;
+	txr = tx ? &xsk->tx : NULL;
+	ret = xsk_socket__create(&xsk->xsk, opt_if, opt_queue, umem->umem,
+				 rxr, txr, &cfg);
 	if (ret)
 		exit_with_error(-ret);
 
@@ -783,6 +792,7 @@  static void enter_xsks_into_map(struct bpf_object *obj)
 int main(int argc, char **argv)
 {
 	struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+	bool rx = false, tx = false;
 	struct xsk_umem_info *umem;
 	struct bpf_object *obj;
 	pthread_t pt;
@@ -811,11 +821,18 @@  int main(int argc, char **argv)
 
 	/* Create sockets... */
 	umem = xsk_configure_umem(bufs, NUM_FRAMES * opt_xsk_frame_size);
+	if (opt_bench == BENCH_RXDROP || opt_bench == BENCH_L2FWD) {
+		rx = true;
+		xsk_populate_fill_ring(umem);
+	}
+	if (opt_bench == BENCH_L2FWD || opt_bench == BENCH_TXONLY)
+		tx = true;
 	for (i = 0; i < opt_num_xsks; i++)
-		xsks[num_socks++] = xsk_configure_socket(umem);
+		xsks[num_socks++] = xsk_configure_socket(umem, rx, tx);
 
-	for (i = 0; i < NUM_FRAMES; i++)
-		gen_eth_frame(umem, i * opt_xsk_frame_size);
+	if (opt_bench == BENCH_TXONLY)
+		for (i = 0; i < NUM_FRAMES; i++)
+			gen_eth_frame(umem, i * opt_xsk_frame_size);
 
 	if (opt_num_xsks > 1 && opt_bench != BENCH_TXONLY)
 		enter_xsks_into_map(obj);