diff mbox series

[iwl-next,v3,1/5] libeth: pass Rx queue index to PP when creating a fill queue

Message ID 20260224174618.2780516-2-aleksander.lobakin@intel.com
State Under Review
Delegated to: Anthony Nguyen
Headers show
Series ice: add support for devmem/io_uring Rx and Tx | expand

Commit Message

Alexander Lobakin Feb. 24, 2026, 5:46 p.m. UTC
Since recently, page_pool_create() accepts optional stack index of
the Rx queue which the pool will be created for. It can then be
used on control path for stuff like memory providers.
Add the same field to libeth_fq and pass the index from all the
drivers using libeth for managing Rx to simplify implementing MP
support later.
idpf has one libeth_fq per buffer/fill queue and each Rx queue has
two fill queues, but since fill queues can never be shared, we can
store the corresponding Rx queue index there during the
initialization to pass it to libeth.

Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
---
 drivers/net/ethernet/intel/idpf/idpf_txrx.h |  2 ++
 include/net/libeth/rx.h                     |  2 ++
 drivers/net/ethernet/intel/iavf/iavf_txrx.c |  1 +
 drivers/net/ethernet/intel/ice/ice_base.c   |  2 ++
 drivers/net/ethernet/intel/idpf/idpf_txrx.c | 13 +++++++++++++
 drivers/net/ethernet/intel/libeth/rx.c      |  1 +
 6 files changed, 21 insertions(+)

Comments

Paul Menzel Feb. 24, 2026, 6:53 p.m. UTC | #1
Dear Alexander,


Thank you for your patch.

Am 24.02.26 um 18:46 schrieb Alexander Lobakin:
> Since recently, page_pool_create() accepts optional stack index of
> the Rx queue which the pool will be created for. It can then be
> used on control path for stuff like memory providers.
> Add the same field to libeth_fq and pass the index from all the
> drivers using libeth for managing Rx to simplify implementing MP
> support later.
> idpf has one libeth_fq per buffer/fill queue and each Rx queue has
> two fill queues, but since fill queues can never be shared, we can
> store the corresponding Rx queue index there during the
> initialization to pass it to libeth.
> 
> Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
> Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
> ---
>   drivers/net/ethernet/intel/idpf/idpf_txrx.h |  2 ++
>   include/net/libeth/rx.h                     |  2 ++
>   drivers/net/ethernet/intel/iavf/iavf_txrx.c |  1 +
>   drivers/net/ethernet/intel/ice/ice_base.c   |  2 ++
>   drivers/net/ethernet/intel/idpf/idpf_txrx.c | 13 +++++++++++++
>   drivers/net/ethernet/intel/libeth/rx.c      |  1 +
>   6 files changed, 21 insertions(+)
> 
> diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
> index 4be5b3b6d3ed..a0d92adf11c4 100644
> --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
> +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
> @@ -748,6 +748,7 @@ libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
>    * @size: Length of descriptor ring in bytes
>    * @dma: Physical address of ring
>    * @q_vector: Backreference to associated vector
> + * @rxq_idx: stack index of the corresponding Rx queue
>    * @rx_buffer_low_watermark: RX buffer low watermark
>    * @rx_hbuf_size: Header buffer size
>    * @rx_buf_size: Buffer size
> @@ -791,6 +792,7 @@ struct idpf_buf_queue {
>   	dma_addr_t dma;
>   
>   	struct idpf_q_vector *q_vector;
> +	u16 rxq_idx;
>   
>   	u16 rx_buffer_low_watermark;
>   	u16 rx_hbuf_size;
> diff --git a/include/net/libeth/rx.h b/include/net/libeth/rx.h
> index 5d991404845e..3b3d7acd13c9 100644
> --- a/include/net/libeth/rx.h
> +++ b/include/net/libeth/rx.h
> @@ -71,6 +71,7 @@ enum libeth_fqe_type {
>    * @xdp: flag indicating whether XDP is enabled
>    * @buf_len: HW-writeable length per each buffer
>    * @nid: ID of the closest NUMA node with memory
> + * @idx: stack index of the corresponding Rx queue
>    */
>   struct libeth_fq {
>   	struct_group_tagged(libeth_fq_fp, fp,
> @@ -88,6 +89,7 @@ struct libeth_fq {
>   
>   	u32			buf_len;
>   	int			nid;
> +	u32			idx;

The type above and here is different (u16 vs u32), despite the 
description being the same. Could you enlighten me why, and maybe add it 
to the commit message?


Kind regards,

Paul


>   };
>   
>   int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi);
> diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
> index 363c42bf3dcf..d3c68659162b 100644
> --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
> +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
> @@ -771,6 +771,7 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
>   		.count		= rx_ring->count,
>   		.buf_len	= LIBIE_MAX_RX_BUF_LEN,
>   		.nid		= NUMA_NO_NODE,
> +		.idx		= rx_ring->queue_index,
>   	};
>   	int ret;
>   
> diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
> index afbff8aa9ceb..1b7d10fad4f2 100644
> --- a/drivers/net/ethernet/intel/ice/ice_base.c
> +++ b/drivers/net/ethernet/intel/ice/ice_base.c
> @@ -607,6 +607,7 @@ static int ice_rxq_pp_create(struct ice_rx_ring *rq)
>   	struct libeth_fq fq = {
>   		.count		= rq->count,
>   		.nid		= NUMA_NO_NODE,
> +		.idx		= rq->q_index,
>   		.hsplit		= rq->vsi->hsplit,
>   		.xdp		= ice_is_xdp_ena_vsi(rq->vsi),
>   		.buf_len	= LIBIE_MAX_RX_BUF_LEN,
> @@ -629,6 +630,7 @@ static int ice_rxq_pp_create(struct ice_rx_ring *rq)
>   		.count		= rq->count,
>   		.type		= LIBETH_FQE_HDR,
>   		.nid		= NUMA_NO_NODE,
> +		.idx		= rq->q_index,
>   		.xdp		= ice_is_xdp_ena_vsi(rq->vsi),
>   	};
>   
> diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
> index 376050308b06..36e2050dbb04 100644
> --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
> +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
> @@ -558,6 +558,7 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
>   		.type	= LIBETH_FQE_HDR,
>   		.xdp	= idpf_xdp_enabled(bufq->q_vector->vport),
>   		.nid	= idpf_q_vector_to_mem(bufq->q_vector),
> +		.idx	= bufq->rxq_idx,
>   	};
>   	int ret;
>   
> @@ -700,6 +701,7 @@ static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
>   		.type		= LIBETH_FQE_MTU,
>   		.buf_len	= IDPF_RX_MAX_BUF_SZ,
>   		.nid		= idpf_q_vector_to_mem(rxq->q_vector),
> +		.idx		= rxq->idx,
>   	};
>   	int ret;
>   
> @@ -760,6 +762,7 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
>   		.hsplit		= idpf_queue_has(HSPLIT_EN, bufq),
>   		.xdp		= idpf_xdp_enabled(bufq->q_vector->vport),
>   		.nid		= idpf_q_vector_to_mem(bufq->q_vector),
> +		.idx		= bufq->rxq_idx,
>   	};
>   	int ret;
>   
> @@ -1919,6 +1922,16 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport,
>   							LIBETH_RX_LL_LEN;
>   			idpf_rxq_set_descids(rsrc, q);
>   		}
> +
> +		if (!idpf_is_queue_model_split(rsrc->rxq_model))
> +			continue;
> +
> +		for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
> +			struct idpf_buf_queue *bufq;
> +
> +			bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
> +			bufq->rxq_idx = rx_qgrp->splitq.rxq_sets[0]->rxq.idx;
> +		}
>   	}
>   
>   err_alloc:
> diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c
> index 62521a1f4ec9..8874b714cdcc 100644
> --- a/drivers/net/ethernet/intel/libeth/rx.c
> +++ b/drivers/net/ethernet/intel/libeth/rx.c
> @@ -156,6 +156,7 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
>   		.order		= LIBETH_RX_PAGE_ORDER,
>   		.pool_size	= fq->count,
>   		.nid		= fq->nid,
> +		.queue_idx	= fq->idx,
>   		.dev		= napi->dev->dev.parent,
>   		.netdev		= napi->dev,
>   		.napi		= napi,
Alexander Lobakin March 3, 2026, 3:42 p.m. UTC | #2
From: Paul Menzel <pmenzel@molgen.mpg.de>
Date: Tue, 24 Feb 2026 19:53:11 +0100

> Dear Alexander,
> 
> 
> Thank you for your patch.
> 
> Am 24.02.26 um 18:46 schrieb Alexander Lobakin:
>> Since recently, page_pool_create() accepts optional stack index of
>> the Rx queue which the pool will be created for. It can then be
>> used on control path for stuff like memory providers.
>> Add the same field to libeth_fq and pass the index from all the
>> drivers using libeth for managing Rx to simplify implementing MP
>> support later.
>> idpf has one libeth_fq per buffer/fill queue and each Rx queue has
>> two fill queues, but since fill queues can never be shared, we can
>> store the corresponding Rx queue index there during the
>> initialization to pass it to libeth.
>>
>> Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
>> Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
>> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>

[...]

>> diff --git a/include/net/libeth/rx.h b/include/net/libeth/rx.h
>> index 5d991404845e..3b3d7acd13c9 100644
>> --- a/include/net/libeth/rx.h
>> +++ b/include/net/libeth/rx.h
>> @@ -71,6 +71,7 @@ enum libeth_fqe_type {
>>    * @xdp: flag indicating whether XDP is enabled
>>    * @buf_len: HW-writeable length per each buffer
>>    * @nid: ID of the closest NUMA node with memory
>> + * @idx: stack index of the corresponding Rx queue
>>    */
>>   struct libeth_fq {
>>       struct_group_tagged(libeth_fq_fp, fp,
>> @@ -88,6 +89,7 @@ struct libeth_fq {
>>         u32            buf_len;
>>       int            nid;
>> +    u32            idx;
> 
> The type above and here is different (u16 vs u32), despite the
> description being the same. Could you enlighten me why, and maybe add it
> to the commit message?

The idpf queue index can never exceed U16_MAX and a u16 field stacks
nicely with other fields there. libeth is more generic and I in general
prefer to use 4+ byte long fields, hence u32.
I don't think it's anyhow important.

> 
> 
> Kind regards,
> 
> Paul
Thanks,
Olek
diff mbox series

Patch

diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 4be5b3b6d3ed..a0d92adf11c4 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -748,6 +748,7 @@  libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
  * @size: Length of descriptor ring in bytes
  * @dma: Physical address of ring
  * @q_vector: Backreference to associated vector
+ * @rxq_idx: stack index of the corresponding Rx queue
  * @rx_buffer_low_watermark: RX buffer low watermark
  * @rx_hbuf_size: Header buffer size
  * @rx_buf_size: Buffer size
@@ -791,6 +792,7 @@  struct idpf_buf_queue {
 	dma_addr_t dma;
 
 	struct idpf_q_vector *q_vector;
+	u16 rxq_idx;
 
 	u16 rx_buffer_low_watermark;
 	u16 rx_hbuf_size;
diff --git a/include/net/libeth/rx.h b/include/net/libeth/rx.h
index 5d991404845e..3b3d7acd13c9 100644
--- a/include/net/libeth/rx.h
+++ b/include/net/libeth/rx.h
@@ -71,6 +71,7 @@  enum libeth_fqe_type {
  * @xdp: flag indicating whether XDP is enabled
  * @buf_len: HW-writeable length per each buffer
  * @nid: ID of the closest NUMA node with memory
+ * @idx: stack index of the corresponding Rx queue
  */
 struct libeth_fq {
 	struct_group_tagged(libeth_fq_fp, fp,
@@ -88,6 +89,7 @@  struct libeth_fq {
 
 	u32			buf_len;
 	int			nid;
+	u32			idx;
 };
 
 int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 363c42bf3dcf..d3c68659162b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -771,6 +771,7 @@  int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
 		.count		= rx_ring->count,
 		.buf_len	= LIBIE_MAX_RX_BUF_LEN,
 		.nid		= NUMA_NO_NODE,
+		.idx		= rx_ring->queue_index,
 	};
 	int ret;
 
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index afbff8aa9ceb..1b7d10fad4f2 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -607,6 +607,7 @@  static int ice_rxq_pp_create(struct ice_rx_ring *rq)
 	struct libeth_fq fq = {
 		.count		= rq->count,
 		.nid		= NUMA_NO_NODE,
+		.idx		= rq->q_index,
 		.hsplit		= rq->vsi->hsplit,
 		.xdp		= ice_is_xdp_ena_vsi(rq->vsi),
 		.buf_len	= LIBIE_MAX_RX_BUF_LEN,
@@ -629,6 +630,7 @@  static int ice_rxq_pp_create(struct ice_rx_ring *rq)
 		.count		= rq->count,
 		.type		= LIBETH_FQE_HDR,
 		.nid		= NUMA_NO_NODE,
+		.idx		= rq->q_index,
 		.xdp		= ice_is_xdp_ena_vsi(rq->vsi),
 	};
 
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 376050308b06..36e2050dbb04 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -558,6 +558,7 @@  static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
 		.type	= LIBETH_FQE_HDR,
 		.xdp	= idpf_xdp_enabled(bufq->q_vector->vport),
 		.nid	= idpf_q_vector_to_mem(bufq->q_vector),
+		.idx	= bufq->rxq_idx,
 	};
 	int ret;
 
@@ -700,6 +701,7 @@  static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
 		.type		= LIBETH_FQE_MTU,
 		.buf_len	= IDPF_RX_MAX_BUF_SZ,
 		.nid		= idpf_q_vector_to_mem(rxq->q_vector),
+		.idx		= rxq->idx,
 	};
 	int ret;
 
@@ -760,6 +762,7 @@  static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
 		.hsplit		= idpf_queue_has(HSPLIT_EN, bufq),
 		.xdp		= idpf_xdp_enabled(bufq->q_vector->vport),
 		.nid		= idpf_q_vector_to_mem(bufq->q_vector),
+		.idx		= bufq->rxq_idx,
 	};
 	int ret;
 
@@ -1919,6 +1922,16 @@  static int idpf_rxq_group_alloc(struct idpf_vport *vport,
 							LIBETH_RX_LL_LEN;
 			idpf_rxq_set_descids(rsrc, q);
 		}
+
+		if (!idpf_is_queue_model_split(rsrc->rxq_model))
+			continue;
+
+		for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
+			struct idpf_buf_queue *bufq;
+
+			bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
+			bufq->rxq_idx = rx_qgrp->splitq.rxq_sets[0]->rxq.idx;
+		}
 	}
 
 err_alloc:
diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c
index 62521a1f4ec9..8874b714cdcc 100644
--- a/drivers/net/ethernet/intel/libeth/rx.c
+++ b/drivers/net/ethernet/intel/libeth/rx.c
@@ -156,6 +156,7 @@  int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
 		.order		= LIBETH_RX_PAGE_ORDER,
 		.pool_size	= fq->count,
 		.nid		= fq->nid,
+		.queue_idx	= fq->idx,
 		.dev		= napi->dev->dev.parent,
 		.netdev		= napi->dev,
 		.napi		= napi,