diff mbox series

[iwl-next,v3,2/5] libeth: handle creating pools with unreadable buffers

Message ID 20260224174618.2780516-3-aleksander.lobakin@intel.com
State Under Review
Delegated to: Anthony Nguyen
Headers show
Series ice: add support for devmem/io_uring Rx and Tx | expand

Commit Message

Alexander Lobakin Feb. 24, 2026, 5:46 p.m. UTC
libeth uses netmems for quite some time already, so in order to
support unreadable frags / memory providers, it only needs to set
PP_FLAG_ALLOW_UNREADABLE_NETMEM when needed.
Also add a couple sanity checks to make sure the driver didn't mess
up the configuration options and, in case when an MP is installed,
return the truesize always equal to PAGE_SIZE, so that
libeth_rx_alloc() will never try to allocate frags. Memory providers
manage buffers on their own and expect 1:1 buffer / HW Rx descriptor
association.

Bonus: mention in the libeth_sqe_type description that
LIBETH_SQE_EMPTY should also be used for netmem Tx SQEs -- they
don't need DMA unmapping.

Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
---
 include/net/libeth/tx.h                |  2 +-
 drivers/net/ethernet/intel/libeth/rx.c | 42 ++++++++++++++++++++++++++
 2 files changed, 43 insertions(+), 1 deletion(-)

Comments

Tantilov, Emil S March 5, 2026, 10:04 p.m. UTC | #1
On 2/24/2026 9:46 AM, Alexander Lobakin wrote:
> libeth uses netmems for quite some time already, so in order to
> support unreadable frags / memory providers, it only needs to set
> PP_FLAG_ALLOW_UNREADABLE_NETMEM when needed.
> Also add a couple sanity checks to make sure the driver didn't mess
> up the configuration options and, in case when an MP is installed,
> return the truesize always equal to PAGE_SIZE, so that
> libeth_rx_alloc() will never try to allocate frags. Memory providers
> manage buffers on their own and expect 1:1 buffer / HW Rx descriptor
> association.
> 
> Bonus: mention in the libeth_sqe_type description that
> LIBETH_SQE_EMPTY should also be used for netmem Tx SQEs -- they
> don't need DMA unmapping.
> 
> Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
> Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
> ---
>   include/net/libeth/tx.h                |  2 +-
>   drivers/net/ethernet/intel/libeth/rx.c | 42 ++++++++++++++++++++++++++
>   2 files changed, 43 insertions(+), 1 deletion(-)
> 
> diff --git a/include/net/libeth/tx.h b/include/net/libeth/tx.h
> index c3db5c6f1641..a66fc2b3a114 100644
> --- a/include/net/libeth/tx.h
> +++ b/include/net/libeth/tx.h
> @@ -12,7 +12,7 @@
>   
>   /**
>    * enum libeth_sqe_type - type of &libeth_sqe to act on Tx completion
> - * @LIBETH_SQE_EMPTY: unused/empty OR XDP_TX/XSk frame, no action required
> + * @LIBETH_SQE_EMPTY: empty OR netmem/XDP_TX/XSk frame, no action required
>    * @LIBETH_SQE_CTX: context descriptor with empty SQE, no action required
>    * @LIBETH_SQE_SLAB: kmalloc-allocated buffer, unmap and kfree()
>    * @LIBETH_SQE_FRAG: mapped skb frag, only unmap DMA
> diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c
> index 8874b714cdcc..11e6e8f353ef 100644
> --- a/drivers/net/ethernet/intel/libeth/rx.c
> +++ b/drivers/net/ethernet/intel/libeth/rx.c
> @@ -6,6 +6,7 @@
>   #include <linux/export.h>
>   
>   #include <net/libeth/rx.h>
> +#include <net/netdev_queues.h>
>   
>   /* Rx buffer management */
>   
> @@ -139,9 +140,47 @@ static bool libeth_rx_page_pool_params_zc(struct libeth_fq *fq,
>   	fq->buf_len = clamp(mtu, LIBETH_RX_BUF_STRIDE, max);
>   	fq->truesize = fq->buf_len;
>   
> +	/*
> +	 * Allow frags only for kernel pages. `fq->truesize == pp->max_len`
> +	 * will always fall back to regular page_pool_alloc_netmems()
> +	 * regardless of the MTU / FQ buffer size.
> +	 */
> +	if (pp->flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM)
> +		fq->truesize = pp->max_len;
> +
>   	return true;
>   }
>   
> +/**
> + * libeth_rx_page_pool_check_unread - check input params for unreadable MPs
> + * @fq: buffer queue to check
> + * @pp: &page_pool_params for the queue
> + *
> + * Make sure we don't create an invalid pool with full-frame unreadable
> + * buffers, bidirectional unreadable buffers or so, and configure the
> + * ZC payload pool accordingly.
> + *
> + * Return: true on success, false on invalid input params.
> + */
> +static bool libeth_rx_page_pool_check_unread(const struct libeth_fq *fq,
> +					     struct page_pool_params *pp)
> +{
> +	if (!netif_rxq_has_unreadable_mp(pp->netdev, pp->queue_idx))
> +		return true;

This is causing a crash on IDPF:

[  420.570632] BUG: kernel NULL pointer dereference, address: 
00000000000000e8
[  420.570684] #PF: supervisor read access in kernel mode
[  420.570712] #PF: error_code(0x0000) - not-present page
[  420.570739] PGD 0
[  420.570757] Oops: Oops: 0000 [#1] SMP NOPTI
[  420.570784] CPU: 35 UID: 0 PID: 1058 Comm: kworker/u258:8 Kdump: 
loaded Tainted: G S         OE       7.0.0-rc1-next-devq-030526+ #34 
PREEMPT(full)
[  420.570844] Tainted: [S]=CPU_OUT_OF_SPEC, [O]=OOT_MODULE, 
[E]=UNSIGNED_MODULE
[  420.570872] Hardware name: Intel Corporation 
M50CYP2SBSTD/M50CYP2SBSTD, BIOS SE5C6200.86B.0027.P10.2201070222 01/07/2022
[  420.570912] Workqueue: idpf-0000:83:00.0-vc_event idpf_vc_event_task 
[idpf]
[  420.570967] RIP: 0010:netif_rxq_has_unreadable_mp+0xf/0x30
[  420.571004] Code: 2e 0f 1f 84 00 00 00 00 00 66 90 90 90 90 90 90 90 
90 90 90 90 90 90 90 90 90 90 f3 0f 1e fa 0f 1f 44 00 00 89 f6 48 c1 e6 
08 <48> 03 b7 e8 00 00 00 48 83 be c8 00 00 00 00 0f 95 c0 e9 8a 25 2b
[  420.571078] RSP: 0018:ff6bf03aa83e7bd8 EFLAGS: 00010246
[  420.571109] RAX: ff3579f9888ed0d0 RBX: ff6bf03aa83e7c78 RCX: 
0000000000000000
[  420.571145] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 
0000000000000000
[  420.571178] RBP: ff6bf03aa83e7be0 R08: 0000000000000040 R09: 
000000004b1a4093
[  420.571213] R10: 0000000000000003 R11: ff3579f9bed90ee0 R12: 
ff3579f9d58c6060
[  420.571247] R13: ff6bf03aa83e7d88 R14: 0000000000000001 R15: 
ff3579f9d58c6050
[  420.571281] FS:  0000000000000000(0000) GS:ff357a195e9db000(0000) 
knlGS:0000000000000000
[  420.571320] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  420.571350] CR2: 00000000000000e8 CR3: 00000023b2e2a006 CR4: 
0000000000773ef0
[  420.571384] PKRU: 55555554
[  420.571402] Call Trace:
[  420.571419]  <TASK>
[  420.571436]  libeth_rx_fq_create+0x7c/0x380 [libeth]
[  420.571479]  libie_ctlq_init+0x304/0x460 [libie_cp]
[  420.571516]  libie_ctlq_xn_init+0x29/0x230 [libie_cp]
[  420.571550]  idpf_init_dflt_mbx+0xa7/0x170 [idpf]
[  420.571611]  idpf_vc_event_task+0x15d/0x2f0 [idpf]
[  420.571655]  process_one_work+0x226/0x730
[  420.571699]  worker_thread+0x19e/0x340
[  420.571729]  ? __pfx_worker_thread+0x10/0x10
[  420.571760]  kthread+0xf4/0x130
[  420.571785]  ? __pfx_kthread+0x10/0x10
[  420.571813]  ret_from_fork+0x32c/0x410
[  420.571844]  ? __pfx_kthread+0x10/0x10
[  420.571871]  ret_from_fork_asm+0x1a/0x30
[  420.571909]  </TASK>

The driver will call idpf_init_dflt_mbx() before the netdevs are created.

Thanks,
Emil

> +
> +	/* For now, the core stack doesn't allow XDP with unreadable frags */
> +	if (fq->xdp)
> +		return false;
> +
> +	/* It should be either a header pool or a ZC payload pool */
> +	if (fq->type == LIBETH_FQE_HDR)
> +		return !fq->hsplit;
> +
> +	pp->flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
> +
> +	return fq->hsplit;
> +}
> +
>   /**
>    * libeth_rx_fq_create - create a PP with the default libeth settings
>    * @fq: buffer queue struct to fill
> @@ -165,6 +204,9 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
>   	struct page_pool *pool;
>   	int ret;
>   
> +	if (!libeth_rx_page_pool_check_unread(fq, &pp))
> +		return -EINVAL;
> +
>   	pp.dma_dir = fq->xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
>   
>   	if (!fq->hsplit)
Alexander Lobakin March 6, 2026, 11:57 a.m. UTC | #2
From: Tantilov, Emil S <emil.s.tantilov@intel.com>
Date: Thu, 5 Mar 2026 14:04:52 -0800

> 
> 
> On 2/24/2026 9:46 AM, Alexander Lobakin wrote:
>> libeth uses netmems for quite some time already, so in order to
>> support unreadable frags / memory providers, it only needs to set
>> PP_FLAG_ALLOW_UNREADABLE_NETMEM when needed.
>> Also add a couple sanity checks to make sure the driver didn't mess
>> up the configuration options and, in case when an MP is installed,
>> return the truesize always equal to PAGE_SIZE, so that
>> libeth_rx_alloc() will never try to allocate frags. Memory providers
>> manage buffers on their own and expect 1:1 buffer / HW Rx descriptor
>> association.
>>
>> Bonus: mention in the libeth_sqe_type description that
>> LIBETH_SQE_EMPTY should also be used for netmem Tx SQEs -- they
>> don't need DMA unmapping.
>>
>> Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
>> Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
>> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
>> ---
>>   include/net/libeth/tx.h                |  2 +-
>>   drivers/net/ethernet/intel/libeth/rx.c | 42 ++++++++++++++++++++++++++
>>   2 files changed, 43 insertions(+), 1 deletion(-)
>>
>> diff --git a/include/net/libeth/tx.h b/include/net/libeth/tx.h
>> index c3db5c6f1641..a66fc2b3a114 100644
>> --- a/include/net/libeth/tx.h
>> +++ b/include/net/libeth/tx.h
>> @@ -12,7 +12,7 @@
>>     /**
>>    * enum libeth_sqe_type - type of &libeth_sqe to act on Tx completion
>> - * @LIBETH_SQE_EMPTY: unused/empty OR XDP_TX/XSk frame, no action
>> required
>> + * @LIBETH_SQE_EMPTY: empty OR netmem/XDP_TX/XSk frame, no action
>> required
>>    * @LIBETH_SQE_CTX: context descriptor with empty SQE, no action
>> required
>>    * @LIBETH_SQE_SLAB: kmalloc-allocated buffer, unmap and kfree()
>>    * @LIBETH_SQE_FRAG: mapped skb frag, only unmap DMA
>> diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/
>> ethernet/intel/libeth/rx.c
>> index 8874b714cdcc..11e6e8f353ef 100644
>> --- a/drivers/net/ethernet/intel/libeth/rx.c
>> +++ b/drivers/net/ethernet/intel/libeth/rx.c
>> @@ -6,6 +6,7 @@
>>   #include <linux/export.h>
>>     #include <net/libeth/rx.h>
>> +#include <net/netdev_queues.h>
>>     /* Rx buffer management */
>>   @@ -139,9 +140,47 @@ static bool
>> libeth_rx_page_pool_params_zc(struct libeth_fq *fq,
>>       fq->buf_len = clamp(mtu, LIBETH_RX_BUF_STRIDE, max);
>>       fq->truesize = fq->buf_len;
>>   +    /*
>> +     * Allow frags only for kernel pages. `fq->truesize == pp->max_len`
>> +     * will always fall back to regular page_pool_alloc_netmems()
>> +     * regardless of the MTU / FQ buffer size.
>> +     */
>> +    if (pp->flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM)
>> +        fq->truesize = pp->max_len;
>> +
>>       return true;
>>   }
>>   +/**
>> + * libeth_rx_page_pool_check_unread - check input params for
>> unreadable MPs
>> + * @fq: buffer queue to check
>> + * @pp: &page_pool_params for the queue
>> + *
>> + * Make sure we don't create an invalid pool with full-frame unreadable
>> + * buffers, bidirectional unreadable buffers or so, and configure the
>> + * ZC payload pool accordingly.
>> + *
>> + * Return: true on success, false on invalid input params.
>> + */
>> +static bool libeth_rx_page_pool_check_unread(const struct libeth_fq *fq,
>> +                         struct page_pool_params *pp)
>> +{
>> +    if (!netif_rxq_has_unreadable_mp(pp->netdev, pp->queue_idx))
>> +        return true;
> 
> This is causing a crash on IDPF:
> 
> [  420.570632] BUG: kernel NULL pointer dereference, address:
> 00000000000000e8
> [  420.570684] #PF: supervisor read access in kernel mode
> [  420.570712] #PF: error_code(0x0000) - not-present page
> [  420.570739] PGD 0
> [  420.570757] Oops: Oops: 0000 [#1] SMP NOPTI
> [  420.570784] CPU: 35 UID: 0 PID: 1058 Comm: kworker/u258:8 Kdump:
> loaded Tainted: G S         OE       7.0.0-rc1-next-devq-030526+ #34
> PREEMPT(full)
> [  420.570844] Tainted: [S]=CPU_OUT_OF_SPEC, [O]=OOT_MODULE,
> [E]=UNSIGNED_MODULE
> [  420.570872] Hardware name: Intel Corporation M50CYP2SBSTD/
> M50CYP2SBSTD, BIOS SE5C6200.86B.0027.P10.2201070222 01/07/2022
> [  420.570912] Workqueue: idpf-0000:83:00.0-vc_event idpf_vc_event_task
> [idpf]
> [  420.570967] RIP: 0010:netif_rxq_has_unreadable_mp+0xf/0x30
> [  420.571004] Code: 2e 0f 1f 84 00 00 00 00 00 66 90 90 90 90 90 90 90
> 90 90 90 90 90 90 90 90 90 90 f3 0f 1e fa 0f 1f 44 00 00 89 f6 48 c1 e6
> 08 <48> 03 b7 e8 00 00 00 48 83 be c8 00 00 00 00 0f 95 c0 e9 8a 25 2b
> [  420.571078] RSP: 0018:ff6bf03aa83e7bd8 EFLAGS: 00010246
> [  420.571109] RAX: ff3579f9888ed0d0 RBX: ff6bf03aa83e7c78 RCX:
> 0000000000000000
> [  420.571145] RDX: 0000000000000000 RSI: 0000000000000000 RDI:
> 0000000000000000
> [  420.571178] RBP: ff6bf03aa83e7be0 R08: 0000000000000040 R09:
> 000000004b1a4093
> [  420.571213] R10: 0000000000000003 R11: ff3579f9bed90ee0 R12:
> ff3579f9d58c6060
> [  420.571247] R13: ff6bf03aa83e7d88 R14: 0000000000000001 R15:
> ff3579f9d58c6050
> [  420.571281] FS:  0000000000000000(0000) GS:ff357a195e9db000(0000)
> knlGS:0000000000000000
> [  420.571320] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [  420.571350] CR2: 00000000000000e8 CR3: 00000023b2e2a006 CR4:
> 0000000000773ef0
> [  420.571384] PKRU: 55555554
> [  420.571402] Call Trace:
> [  420.571419]  <TASK>
> [  420.571436]  libeth_rx_fq_create+0x7c/0x380 [libeth]
> [  420.571479]  libie_ctlq_init+0x304/0x460 [libie_cp]
> [  420.571516]  libie_ctlq_xn_init+0x29/0x230 [libie_cp]
> [  420.571550]  idpf_init_dflt_mbx+0xa7/0x170 [idpf]
> [  420.571611]  idpf_vc_event_task+0x15d/0x2f0 [idpf]
> [  420.571655]  process_one_work+0x226/0x730
> [  420.571699]  worker_thread+0x19e/0x340
> [  420.571729]  ? __pfx_worker_thread+0x10/0x10
> [  420.571760]  kthread+0xf4/0x130
> [  420.571785]  ? __pfx_kthread+0x10/0x10
> [  420.571813]  ret_from_fork+0x32c/0x410
> [  420.571844]  ? __pfx_kthread+0x10/0x10
> [  420.571871]  ret_from_fork_asm+0x1a/0x30
> [  420.571909]  </TASK>
> 
> The driver will call idpf_init_dflt_mbx() before the netdevs are created.
> 
> Thanks,
> Emil

This series is based on top of the latest net-next as the PR will be
sent soon. net-next doesn't have libie_ctql.

For the tree which contains ixd this will look different as it
introduces ability to pass a device instead of napi_struct. Once this PR
is accepted, we'll rebase the next-queue to handle this.

Thanks,
Olek
diff mbox series

Patch

diff --git a/include/net/libeth/tx.h b/include/net/libeth/tx.h
index c3db5c6f1641..a66fc2b3a114 100644
--- a/include/net/libeth/tx.h
+++ b/include/net/libeth/tx.h
@@ -12,7 +12,7 @@ 
 
 /**
  * enum libeth_sqe_type - type of &libeth_sqe to act on Tx completion
- * @LIBETH_SQE_EMPTY: unused/empty OR XDP_TX/XSk frame, no action required
+ * @LIBETH_SQE_EMPTY: empty OR netmem/XDP_TX/XSk frame, no action required
  * @LIBETH_SQE_CTX: context descriptor with empty SQE, no action required
  * @LIBETH_SQE_SLAB: kmalloc-allocated buffer, unmap and kfree()
  * @LIBETH_SQE_FRAG: mapped skb frag, only unmap DMA
diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c
index 8874b714cdcc..11e6e8f353ef 100644
--- a/drivers/net/ethernet/intel/libeth/rx.c
+++ b/drivers/net/ethernet/intel/libeth/rx.c
@@ -6,6 +6,7 @@ 
 #include <linux/export.h>
 
 #include <net/libeth/rx.h>
+#include <net/netdev_queues.h>
 
 /* Rx buffer management */
 
@@ -139,9 +140,47 @@  static bool libeth_rx_page_pool_params_zc(struct libeth_fq *fq,
 	fq->buf_len = clamp(mtu, LIBETH_RX_BUF_STRIDE, max);
 	fq->truesize = fq->buf_len;
 
+	/*
+	 * Allow frags only for kernel pages. `fq->truesize == pp->max_len`
+	 * will always fall back to regular page_pool_alloc_netmems()
+	 * regardless of the MTU / FQ buffer size.
+	 */
+	if (pp->flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM)
+		fq->truesize = pp->max_len;
+
 	return true;
 }
 
+/**
+ * libeth_rx_page_pool_check_unread - check input params for unreadable MPs
+ * @fq: buffer queue to check
+ * @pp: &page_pool_params for the queue
+ *
+ * Make sure we don't create an invalid pool with full-frame unreadable
+ * buffers, bidirectional unreadable buffers or so, and configure the
+ * ZC payload pool accordingly.
+ *
+ * Return: true on success, false on invalid input params.
+ */
+static bool libeth_rx_page_pool_check_unread(const struct libeth_fq *fq,
+					     struct page_pool_params *pp)
+{
+	if (!netif_rxq_has_unreadable_mp(pp->netdev, pp->queue_idx))
+		return true;
+
+	/* For now, the core stack doesn't allow XDP with unreadable frags */
+	if (fq->xdp)
+		return false;
+
+	/* It should be either a header pool or a ZC payload pool */
+	if (fq->type == LIBETH_FQE_HDR)
+		return !fq->hsplit;
+
+	pp->flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+
+	return fq->hsplit;
+}
+
 /**
  * libeth_rx_fq_create - create a PP with the default libeth settings
  * @fq: buffer queue struct to fill
@@ -165,6 +204,9 @@  int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
 	struct page_pool *pool;
 	int ret;
 
+	if (!libeth_rx_page_pool_check_unread(fq, &pp))
+		return -EINVAL;
+
 	pp.dma_dir = fq->xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
 
 	if (!fq->hsplit)