diff mbox series

[bpf-next,RFCv2,1/3] xsk: add xsk_umem_consume_tx_virtual.

Message ID 1545181493-8743-2-git-send-email-u9012063@gmail.com
State RFC, archived
Delegated to: BPF Maintainers
Headers show
Series AF_XDP support for veth. | expand

Commit Message

William Tu Dec. 19, 2018, 1:04 a.m. UTC
Currently the xsk_umem_consume_tx expects only the physical NICs so
the api returns a dma address.  This patch introduce the new function
to return the virtual address, when XSK is used by a virtual device.

Signed-off-by: William Tu <u9012063@gmail.com>
---
 include/net/xdp_sock.h |  7 +++++++
 net/xdp/xdp_umem.c     |  1 +
 net/xdp/xsk.c          | 21 +++++++++++++++++++--
 3 files changed, 27 insertions(+), 2 deletions(-)

Comments

Björn Töpel Dec. 20, 2018, 7:48 p.m. UTC | #1
Den ons 19 dec. 2018 kl 01:55 skrev William Tu <u9012063@gmail.com>:
>
> Currently the xsk_umem_consume_tx expects only the physical NICs so
> the api returns a dma address.  This patch introduce the new function
> to return the virtual address, when XSK is used by a virtual device.
>
> Signed-off-by: William Tu <u9012063@gmail.com>
> ---
>  include/net/xdp_sock.h |  7 +++++++
>  net/xdp/xdp_umem.c     |  1 +
>  net/xdp/xsk.c          | 21 +++++++++++++++++++--
>  3 files changed, 27 insertions(+), 2 deletions(-)
>
> diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
> index 13acb9803a6d..7fefe74f7fb5 100644
> --- a/include/net/xdp_sock.h
> +++ b/include/net/xdp_sock.h
> @@ -81,6 +81,7 @@ u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
>  void xsk_umem_discard_addr(struct xdp_umem *umem);
>  void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
>  bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
> +bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem, void **addr, u32 *len);
>  void xsk_umem_consume_tx_done(struct xdp_umem *umem);
>  struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
>  struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
> @@ -165,6 +166,12 @@ static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma,
>         return false;
>  }
>
> +static inline bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem,
> +                                              void **vaddr, u32 *len)
> +{
> +       return false;
> +}
> +
>  static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
>  {
>  }
> diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
> index a264cf2accd0..424ae2538f9f 100644
> --- a/net/xdp/xdp_umem.c
> +++ b/net/xdp/xdp_umem.c
> @@ -60,6 +60,7 @@ struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
>
>         return NULL;
>  }
> +EXPORT_SYMBOL(xdp_get_umem_from_qid);
>
>  static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
>  {
> diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> index 07156f43d295..a6ca0c0f0330 100644
> --- a/net/xdp/xsk.c
> +++ b/net/xdp/xsk.c
> @@ -170,7 +170,8 @@ void xsk_umem_consume_tx_done(struct xdp_umem *umem)
>  }
>  EXPORT_SYMBOL(xsk_umem_consume_tx_done);
>
> -bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
> +static bool __xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma,
> +                            void **vaddr, u32 *len)
>  {
>         struct xdp_desc desc;
>         struct xdp_sock *xs;
> @@ -183,7 +184,12 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
>                 if (xskq_produce_addr_lazy(umem->cq, desc.addr))
>                         goto out;
>
> -               *dma = xdp_umem_get_dma(umem, desc.addr);
> +               if (dma)
> +                       *dma = xdp_umem_get_dma(umem, desc.addr);
> +
> +               if (vaddr)
> +                       *vaddr = xdp_umem_get_data(umem, desc.addr);
> +

This function is in the fast-path, so I'm reluctant to introduce the
branching above. What do think about something like this instead?

diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 80ca48cefc42..458f0977b437 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -170,22 +170,19 @@ void xsk_umem_consume_tx_done(struct xdp_umem *umem)
 }
 EXPORT_SYMBOL(xsk_umem_consume_tx_done);

-bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
+static __always_inline bool __xsk_umem_consume_tx(struct xdp_umem *umem,
+                          struct xdp_desc *desc)
 {
-    struct xdp_desc desc;
     struct xdp_sock *xs;

     rcu_read_lock();
     list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
-        if (!xskq_peek_desc(xs->tx, &desc))
+        if (!xskq_peek_desc(xs->tx, desc))
             continue;

-        if (xskq_produce_addr_lazy(umem->cq, desc.addr))
+        if (xskq_produce_addr_lazy(umem->cq, desc->addr))
             goto out;

-        *dma = xdp_umem_get_dma(umem, desc.addr);
-        *len = desc.len;
-
         xskq_discard_desc(xs->tx);
         rcu_read_unlock();
         return true;
@@ -195,8 +192,35 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem,
dma_addr_t *dma, u32 *len)
     rcu_read_unlock();
     return false;
 }
+
+bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
+{
+    struct xdp_desc desc;
+
+    if (!__xsk_umem_consume_tx(umem, &desc))
+        return false;
+
+    *dma = xdp_umem_get_dma(umem, desc.addr);
+    *len = desc.len;
+
+    return true;
+}
 EXPORT_SYMBOL(xsk_umem_consume_tx);

+bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem, void **vaddr, u32 *len)
+{
+    struct xdp_desc desc;
+
+    if (!__xsk_umem_consume_tx(umem, &desc))
+        return false;
+
+    *vaddr = xdp_umem_get_data(umem, desc.addr);
+    *len = desc.len;
+
+    return true;
+}
+EXPORT_SYMBOL(xsk_umem_consume_tx_virtual);
+
 static int xsk_zc_xmit(struct sock *sk)
 {
     struct xdp_sock *xs = xdp_sk(sk);


Björn



>                 *len = desc.len;
>
>                 xskq_discard_desc(xs->tx);
> @@ -195,8 +201,19 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
>         rcu_read_unlock();
>         return false;
>  }
> +
> +bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
> +{
> +       return __xsk_umem_consume_tx(umem, dma, NULL, len);
> +}
>  EXPORT_SYMBOL(xsk_umem_consume_tx);
>
> +bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem, void **addr, u32 *len)
> +{
> +       return __xsk_umem_consume_tx(umem, NULL, addr, len);
> +}
> +EXPORT_SYMBOL(xsk_umem_consume_tx_virtual);
> +
>  static int xsk_zc_xmit(struct sock *sk)
>  {
>         struct xdp_sock *xs = xdp_sk(sk);
> --
> 2.7.4
>
William Tu Dec. 21, 2018, 8:02 p.m. UTC | #2
On Thu, Dec 20, 2018 at 11:48 AM Björn Töpel <bjorn.topel@gmail.com> wrote:
>
> Den ons 19 dec. 2018 kl 01:55 skrev William Tu <u9012063@gmail.com>:
> >
> > Currently the xsk_umem_consume_tx expects only the physical NICs so
> > the api returns a dma address.  This patch introduce the new function
> > to return the virtual address, when XSK is used by a virtual device.
> >
> > Signed-off-by: William Tu <u9012063@gmail.com>
> > ---
> >  include/net/xdp_sock.h |  7 +++++++
> >  net/xdp/xdp_umem.c     |  1 +
> >  net/xdp/xsk.c          | 21 +++++++++++++++++++--
> >  3 files changed, 27 insertions(+), 2 deletions(-)
> >
> > diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
> > index 13acb9803a6d..7fefe74f7fb5 100644
> > --- a/include/net/xdp_sock.h
> > +++ b/include/net/xdp_sock.h
> > @@ -81,6 +81,7 @@ u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
> >  void xsk_umem_discard_addr(struct xdp_umem *umem);
> >  void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
> >  bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
> > +bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem, void **addr, u32 *len);
> >  void xsk_umem_consume_tx_done(struct xdp_umem *umem);
> >  struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
> >  struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
> > @@ -165,6 +166,12 @@ static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma,
> >         return false;
> >  }
> >
> > +static inline bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem,
> > +                                              void **vaddr, u32 *len)
> > +{
> > +       return false;
> > +}
> > +
> >  static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
> >  {
> >  }
> > diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
> > index a264cf2accd0..424ae2538f9f 100644
> > --- a/net/xdp/xdp_umem.c
> > +++ b/net/xdp/xdp_umem.c
> > @@ -60,6 +60,7 @@ struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
> >
> >         return NULL;
> >  }
> > +EXPORT_SYMBOL(xdp_get_umem_from_qid);
> >
> >  static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
> >  {
> > diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> > index 07156f43d295..a6ca0c0f0330 100644
> > --- a/net/xdp/xsk.c
> > +++ b/net/xdp/xsk.c
> > @@ -170,7 +170,8 @@ void xsk_umem_consume_tx_done(struct xdp_umem *umem)
> >  }
> >  EXPORT_SYMBOL(xsk_umem_consume_tx_done);
> >
> > -bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
> > +static bool __xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma,
> > +                            void **vaddr, u32 *len)
> >  {
> >         struct xdp_desc desc;
> >         struct xdp_sock *xs;
> > @@ -183,7 +184,12 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
> >                 if (xskq_produce_addr_lazy(umem->cq, desc.addr))
> >                         goto out;
> >
> > -               *dma = xdp_umem_get_dma(umem, desc.addr);
> > +               if (dma)
> > +                       *dma = xdp_umem_get_dma(umem, desc.addr);
> > +
> > +               if (vaddr)
> > +                       *vaddr = xdp_umem_get_data(umem, desc.addr);
> > +
>
> This function is in the fast-path, so I'm reluctant to introduce the
> branching above. What do think about something like this instead?
>
Yes, make sense. I will merge into my next patch set.
Thanks
William

> diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> index 80ca48cefc42..458f0977b437 100644
> --- a/net/xdp/xsk.c
> +++ b/net/xdp/xsk.c
> @@ -170,22 +170,19 @@ void xsk_umem_consume_tx_done(struct xdp_umem *umem)
>  }
>  EXPORT_SYMBOL(xsk_umem_consume_tx_done);
>
> -bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
> +static __always_inline bool __xsk_umem_consume_tx(struct xdp_umem *umem,
> +                          struct xdp_desc *desc)
>  {
> -    struct xdp_desc desc;
>      struct xdp_sock *xs;
>
>      rcu_read_lock();
>      list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
> -        if (!xskq_peek_desc(xs->tx, &desc))
> +        if (!xskq_peek_desc(xs->tx, desc))
>              continue;
>
> -        if (xskq_produce_addr_lazy(umem->cq, desc.addr))
> +        if (xskq_produce_addr_lazy(umem->cq, desc->addr))
>              goto out;
>
> -        *dma = xdp_umem_get_dma(umem, desc.addr);
> -        *len = desc.len;
> -
>          xskq_discard_desc(xs->tx);
>          rcu_read_unlock();
>          return true;
> @@ -195,8 +192,35 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem,
> dma_addr_t *dma, u32 *len)
>      rcu_read_unlock();
>      return false;
>  }
> +
> +bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
> +{
> +    struct xdp_desc desc;
> +
> +    if (!__xsk_umem_consume_tx(umem, &desc))
> +        return false;
> +
> +    *dma = xdp_umem_get_dma(umem, desc.addr);
> +    *len = desc.len;
> +
> +    return true;
> +}
>  EXPORT_SYMBOL(xsk_umem_consume_tx);
>
> +bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem, void **vaddr, u32 *len)
> +{
> +    struct xdp_desc desc;
> +
> +    if (!__xsk_umem_consume_tx(umem, &desc))
> +        return false;
> +
> +    *vaddr = xdp_umem_get_data(umem, desc.addr);
> +    *len = desc.len;
> +
> +    return true;
> +}
> +EXPORT_SYMBOL(xsk_umem_consume_tx_virtual);
> +
>  static int xsk_zc_xmit(struct sock *sk)
>  {
>      struct xdp_sock *xs = xdp_sk(sk);
>
>
> Björn
>
>
>
> >                 *len = desc.len;
> >
> >                 xskq_discard_desc(xs->tx);
> > @@ -195,8 +201,19 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
> >         rcu_read_unlock();
> >         return false;
> >  }
> > +
> > +bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
> > +{
> > +       return __xsk_umem_consume_tx(umem, dma, NULL, len);
> > +}
> >  EXPORT_SYMBOL(xsk_umem_consume_tx);
> >
> > +bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem, void **addr, u32 *len)
> > +{
> > +       return __xsk_umem_consume_tx(umem, NULL, addr, len);
> > +}
> > +EXPORT_SYMBOL(xsk_umem_consume_tx_virtual);
> > +
> >  static int xsk_zc_xmit(struct sock *sk)
> >  {
> >         struct xdp_sock *xs = xdp_sk(sk);
> > --
> > 2.7.4
> >
diff mbox series

Patch

diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 13acb9803a6d..7fefe74f7fb5 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -81,6 +81,7 @@  u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
 void xsk_umem_discard_addr(struct xdp_umem *umem);
 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
 bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
+bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem, void **addr, u32 *len);
 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
 struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
@@ -165,6 +166,12 @@  static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma,
 	return false;
 }
 
+static inline bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem,
+					       void **vaddr, u32 *len)
+{
+	return false;
+}
+
 static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
 {
 }
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index a264cf2accd0..424ae2538f9f 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -60,6 +60,7 @@  struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
 
 	return NULL;
 }
+EXPORT_SYMBOL(xdp_get_umem_from_qid);
 
 static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
 {
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 07156f43d295..a6ca0c0f0330 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -170,7 +170,8 @@  void xsk_umem_consume_tx_done(struct xdp_umem *umem)
 }
 EXPORT_SYMBOL(xsk_umem_consume_tx_done);
 
-bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
+static bool __xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma,
+			     void **vaddr, u32 *len)
 {
 	struct xdp_desc desc;
 	struct xdp_sock *xs;
@@ -183,7 +184,12 @@  bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
 		if (xskq_produce_addr_lazy(umem->cq, desc.addr))
 			goto out;
 
-		*dma = xdp_umem_get_dma(umem, desc.addr);
+		if (dma)
+			*dma = xdp_umem_get_dma(umem, desc.addr);
+
+		if (vaddr)
+			*vaddr = xdp_umem_get_data(umem, desc.addr);
+
 		*len = desc.len;
 
 		xskq_discard_desc(xs->tx);
@@ -195,8 +201,19 @@  bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
 	rcu_read_unlock();
 	return false;
 }
+
+bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
+{
+	return __xsk_umem_consume_tx(umem, dma, NULL, len);
+}
 EXPORT_SYMBOL(xsk_umem_consume_tx);
 
+bool xsk_umem_consume_tx_virtual(struct xdp_umem *umem, void **addr, u32 *len)
+{
+	return __xsk_umem_consume_tx(umem, NULL, addr, len);
+}
+EXPORT_SYMBOL(xsk_umem_consume_tx_virtual);
+
 static int xsk_zc_xmit(struct sock *sk)
 {
 	struct xdp_sock *xs = xdp_sk(sk);