diff mbox series

[02/11] ixgbe: simplify Rx buffer recycle

Message ID 20190620090958.2135-3-kevin.laatz@intel.com
State Awaiting Upstream
Headers show
Series XDP unaligned chunk placement support | expand

Commit Message

Laatz, Kevin June 20, 2019, 9:09 a.m. UTC
Currently, the dma, addr and handle are modified when we reuse Rx buffers
in zero-copy mode. However, this is not required as the inputs to the
function are copies, not the original values themselves. As we use the
copies within the function, we can use the original 'obi' values
directly without having to mask and add the headroom.

Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
---
 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 13 +++----------
 1 file changed, 3 insertions(+), 10 deletions(-)

Comments

Björn Töpel June 24, 2019, 2:30 p.m. UTC | #1
On Thu, 20 Jun 2019 at 19:25, Kevin Laatz <kevin.laatz@intel.com> wrote:
>
> Currently, the dma, addr and handle are modified when we reuse Rx buffers
> in zero-copy mode. However, this is not required as the inputs to the
> function are copies, not the original values themselves. As we use the
> copies within the function, we can use the original 'obi' values
> directly without having to mask and add the headroom.
>
> Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>

Acked-by: Björn Töpel <bjorn.topel@intel.com>


> ---
>  drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 13 +++----------
>  1 file changed, 3 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
> index bfe95ce0bd7f..49536adafe8e 100644
> --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
> +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
> @@ -251,8 +251,6 @@ ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring,
>  static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
>                                      struct ixgbe_rx_buffer *obi)
>  {
> -       unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
> -       u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
>         u16 nta = rx_ring->next_to_alloc;
>         struct ixgbe_rx_buffer *nbi;
>
> @@ -262,14 +260,9 @@ static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
>         rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
>
>         /* transfer page from old buffer to new buffer */
> -       nbi->dma = obi->dma & mask;
> -       nbi->dma += hr;
> -
> -       nbi->addr = (void *)((unsigned long)obi->addr & mask);
> -       nbi->addr += hr;
> -
> -       nbi->handle = obi->handle & mask;
> -       nbi->handle += rx_ring->xsk_umem->headroom;
> +       nbi->dma = obi->dma;
> +       nbi->addr = obi->addr;
> +       nbi->handle = obi->handle;
>
>         obi->addr = NULL;
>         obi->skb = NULL;
> --
> 2.17.1
>
diff mbox series

Patch

diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index bfe95ce0bd7f..49536adafe8e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -251,8 +251,6 @@  ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring,
 static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
 				     struct ixgbe_rx_buffer *obi)
 {
-	unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
-	u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
 	u16 nta = rx_ring->next_to_alloc;
 	struct ixgbe_rx_buffer *nbi;
 
@@ -262,14 +260,9 @@  static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 
 	/* transfer page from old buffer to new buffer */
-	nbi->dma = obi->dma & mask;
-	nbi->dma += hr;
-
-	nbi->addr = (void *)((unsigned long)obi->addr & mask);
-	nbi->addr += hr;
-
-	nbi->handle = obi->handle & mask;
-	nbi->handle += rx_ring->xsk_umem->headroom;
+	nbi->dma = obi->dma;
+	nbi->addr = obi->addr;
+	nbi->handle = obi->handle;
 
 	obi->addr = NULL;
 	obi->skb = NULL;