diff mbox

[ovs-dev,branch-2.6] netdev-dpdk: leverage the mempool offload support

Message ID 1490009924-29839-1-git-send-email-hemant.agrawal@nxp.com
State Not Applicable
Delegated to: Darrell Ball
Headers show

Commit Message

Hemant Agrawal March 20, 2017, 11:38 a.m. UTC
DPDK 16.07 introduced the support for mempool offload support.
rte_pktmbuf_pool_create is the recommended method for creating pktmbuf
pools. Buffer pools created with rte_mempool_create may not get offloaded
to the underlying offloaded mempools.

This patch, changes the rte_mempool_create to use helper wrapper
"rte_pktmbuf_pool_create" provided by dpdk, so that it can leverage
offloaded mempools.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 lib/netdev-dpdk.c | 20 +++++++++-----------
 1 file changed, 9 insertions(+), 11 deletions(-)

Comments

Kevin Traynor March 21, 2017, 9 p.m. UTC | #1
On 03/20/2017 11:38 AM, Hemant Agrawal wrote:
> DPDK 16.07 introduced the support for mempool offload support.
> rte_pktmbuf_pool_create is the recommended method for creating pktmbuf
> pools. Buffer pools created with rte_mempool_create may not get offloaded
> to the underlying offloaded mempools.
> 
> This patch, changes the rte_mempool_create to use helper wrapper
> "rte_pktmbuf_pool_create" provided by dpdk, so that it can leverage
> offloaded mempools.

This is changing OVS code to enable new functionality in some cases, I'm
not sure that it's really appropriate as a backport.

> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
>  lib/netdev-dpdk.c | 20 +++++++++-----------
>  1 file changed, 9 insertions(+), 11 deletions(-)
> 
> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
> index 9203de0..8d658ff 100644
> --- a/lib/netdev-dpdk.c
> +++ b/lib/netdev-dpdk.c
> @@ -468,7 +468,6 @@ dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
>      struct dpdk_mp *dmp = NULL;
>      char mp_name[RTE_MEMPOOL_NAMESIZE];
>      unsigned mp_size;
> -    struct rte_pktmbuf_pool_private mbp_priv;
>  
>      LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
>          if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
> @@ -481,9 +480,6 @@ dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
>      dmp->socket_id = socket_id;
>      dmp->mtu = mtu;
>      dmp->refcount = 1;
> -    mbp_priv.mbuf_data_room_size = MBUF_SIZE(mtu) - sizeof(struct dp_packet);
> -    mbp_priv.mbuf_priv_size = sizeof (struct dp_packet)
> -                              - sizeof (struct rte_mbuf);
>      /* XXX: this is a really rough method of provisioning memory.
>       * It's impossible to determine what the exact memory requirements are when
>       * the number of ports and rxqs that utilize a particular mempool can change
> @@ -500,13 +496,13 @@ dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
>                       dmp->mtu, dmp->socket_id, mp_size) < 0) {
>              goto fail;
>          }
> -
> -        dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
> -                                     MP_CACHE_SZ,
> -                                     sizeof(struct rte_pktmbuf_pool_private),
> -                                     rte_pktmbuf_pool_init, &mbp_priv,
> -                                     ovs_rte_pktmbuf_init, NULL,
> -                                     socket_id, 0);
> +	dmp->mp = rte_pktmbuf_pool_create(mp_name, mp_size,
> +                                          MP_CACHE_SZ,
> +                                          sizeof (struct dp_packet)
> +                                                 - sizeof (struct rte_mbuf),
> +                                          MBUF_SIZE(mtu)
> +                                                 - sizeof(struct dp_packet),
> +                                          socket_id);
>      } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
>  
>      if (dmp->mp == NULL) {
> @@ -514,6 +510,8 @@ dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
>      } else {
>          VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
>      }
> +    /* pktmbuf only prepare the rte_mbuf, prepare the ovs bufs */
> +    rte_mempool_obj_iter(dmp->mp, ovs_rte_pktmbuf_init, NULL);
>  
>      ovs_list_push_back(&dpdk_mp_list, &dmp->list_node);
>      return dmp;
>
Darrell Ball March 21, 2017, 10:36 p.m. UTC | #2
On 3/21/17, 2:00 PM, "ovs-dev-bounces@openvswitch.org on behalf of Kevin Traynor" <ovs-dev-bounces@openvswitch.org on behalf of ktraynor@redhat.com> wrote:

    On 03/20/2017 11:38 AM, Hemant Agrawal wrote:
    > DPDK 16.07 introduced the support for mempool offload support.
    > rte_pktmbuf_pool_create is the recommended method for creating pktmbuf
    > pools. Buffer pools created with rte_mempool_create may not get offloaded
    > to the underlying offloaded mempools.
    > 
    > This patch, changes the rte_mempool_create to use helper wrapper
    > "rte_pktmbuf_pool_create" provided by dpdk, so that it can leverage
    > offloaded mempools.
    
    This is changing OVS code to enable new functionality in some cases, I'm
    not sure that it's really appropriate as a backport.

+1
It is not a bug fix, but rather an enhancement and also cleaner api, so
would not seem backport eligible.

    
    > 
    > Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
    > ---
    >  lib/netdev-dpdk.c | 20 +++++++++-----------
    >  1 file changed, 9 insertions(+), 11 deletions(-)
    > 
    > diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
    > index 9203de0..8d658ff 100644
    > --- a/lib/netdev-dpdk.c
    > +++ b/lib/netdev-dpdk.c
    > @@ -468,7 +468,6 @@ dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
    >      struct dpdk_mp *dmp = NULL;
    >      char mp_name[RTE_MEMPOOL_NAMESIZE];
    >      unsigned mp_size;
    > -    struct rte_pktmbuf_pool_private mbp_priv;
    >  
    >      LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
    >          if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
    > @@ -481,9 +480,6 @@ dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
    >      dmp->socket_id = socket_id;
    >      dmp->mtu = mtu;
    >      dmp->refcount = 1;
    > -    mbp_priv.mbuf_data_room_size = MBUF_SIZE(mtu) - sizeof(struct dp_packet);
    > -    mbp_priv.mbuf_priv_size = sizeof (struct dp_packet)
    > -                              - sizeof (struct rte_mbuf);
    >      /* XXX: this is a really rough method of provisioning memory.
    >       * It's impossible to determine what the exact memory requirements are when
    >       * the number of ports and rxqs that utilize a particular mempool can change
    > @@ -500,13 +496,13 @@ dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
    >                       dmp->mtu, dmp->socket_id, mp_size) < 0) {
    >              goto fail;
    >          }
    > -
    > -        dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
    > -                                     MP_CACHE_SZ,
    > -                                     sizeof(struct rte_pktmbuf_pool_private),
    > -                                     rte_pktmbuf_pool_init, &mbp_priv,
    > -                                     ovs_rte_pktmbuf_init, NULL,
    > -                                     socket_id, 0);
    > +	dmp->mp = rte_pktmbuf_pool_create(mp_name, mp_size,
    > +                                          MP_CACHE_SZ,
    > +                                          sizeof (struct dp_packet)
    > +                                                 - sizeof (struct rte_mbuf),
    > +                                          MBUF_SIZE(mtu)
    > +                                                 - sizeof(struct dp_packet),
    > +                                          socket_id);
    >      } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
    >  
    >      if (dmp->mp == NULL) {
    > @@ -514,6 +510,8 @@ dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
    >      } else {
    >          VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
    >      }
    > +    /* pktmbuf only prepare the rte_mbuf, prepare the ovs bufs */
    > +    rte_mempool_obj_iter(dmp->mp, ovs_rte_pktmbuf_init, NULL);
    >  
    >      ovs_list_push_back(&dpdk_mp_list, &dmp->list_node);
    >      return dmp;
    > 
    
    _______________________________________________
    dev mailing list
    dev@openvswitch.org
    https://urldefense.proofpoint.com/v2/url?u=https-3A__mail.openvswitch.org_mailman_listinfo_ovs-2Ddev&d=DwICAg&c=uilaK90D4TOVoH58JNXRgQ&r=BVhFA09CGX7JQ5Ih-uZnsw&m=-W7aEdrfE9YA4xel7blbyN14gy2c_fvP74McC_hsv04&s=cqWnR2ZQv358LHVWn9vkS3OHalhgffnveJhn-RfV76k&e=
Hemant Agrawal March 23, 2017, 8:47 a.m. UTC | #3
On 3/22/2017 4:06 AM, Darrell Ball wrote:
> 
> On 3/21/17, 2:00 PM, "ovs-dev-bounces@openvswitch.org on behalf of Kevin
> Traynor" <ovs-dev-bounces@openvswitch.org on behalf of
> ktraynor@redhat.com> wrote:
> 
>     On 03/20/2017 11:38 AM, Hemant Agrawal wrote:
>     > DPDK 16.07 introduced the support for mempool offload support.
>     > rte_pktmbuf_pool_create is the recommended method for creating pktmbuf
>     > pools. Buffer pools created with rte_mempool_create may not get
> offloaded
>     > to the underlying offloaded mempools.
>     >
>     > This patch, changes the rte_mempool_create to use helper wrapper
>     > "rte_pktmbuf_pool_create" provided by dpdk, so that it can leverage
>     > offloaded mempools.
> 
>     This is changing OVS code to enable new functionality in some cases, I'm
>     not sure that it's really appropriate as a backport.
> 
> +1
> It is not a bug fix, but rather an enhancement and also cleaner api, so would not
> seem backport eligible.

 [Hemant] I can't disagree with the argument given.

The only Argument I have is that DPDK PMDs, which are using the offloaded pkt pools, the rte_mempool_create will not work with them. NXP happen to have one such PMD.
(But those are not part of dpdk.org - 16.07 stable tree).


> 
> 
>     >
>     > Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
>     > ---
>     >  lib/netdev-dpdk.c | 20 +++++++++-----------
>     >  1 file changed, 9 insertions(+), 11 deletions(-)
>     >
>     > diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
>     > index 9203de0..8d658ff 100644
>     > --- a/lib/netdev-dpdk.c
>     > +++ b/lib/netdev-dpdk.c
>     > @@ -468,7 +468,6 @@ dpdk_mp_get(int socket_id, int mtu)
> OVS_REQUIRES(dpdk_mutex)
>     >      struct dpdk_mp *dmp = NULL;
>     >      char mp_name[RTE_MEMPOOL_NAMESIZE];
>     >      unsigned mp_size;
>     > -    struct rte_pktmbuf_pool_private mbp_priv;
>     >
>     >      LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
>     >          if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
>     > @@ -481,9 +480,6 @@ dpdk_mp_get(int socket_id, int mtu)
> OVS_REQUIRES(dpdk_mutex)
>     >      dmp->socket_id = socket_id;
>     >      dmp->mtu = mtu;
>     >      dmp->refcount = 1;
>     > -    mbp_priv.mbuf_data_room_size = MBUF_SIZE(mtu) - sizeof(struct
> dp_packet);
>     > -    mbp_priv.mbuf_priv_size = sizeof (struct dp_packet)
>     > -                              - sizeof (struct rte_mbuf);
>     >      /* XXX: this is a really rough method of provisioning memory.
>     >       * It's impossible to determine what the exact memory requirements are
> when
>     >       * the number of ports and rxqs that utilize a particular mempool can
> change
>     > @@ -500,13 +496,13 @@ dpdk_mp_get(int socket_id, int mtu)
> OVS_REQUIRES(dpdk_mutex)
>     >                       dmp->mtu, dmp->socket_id, mp_size) < 0) {
>     >              goto fail;
>     >          }
>     > -
>     > -        dmp->mp = rte_mempool_create(mp_name, mp_size,
> MBUF_SIZE(mtu),
>     > -                                     MP_CACHE_SZ,
>     > -                                     sizeof(struct rte_pktmbuf_pool_private),
>     > -                                     rte_pktmbuf_pool_init, &mbp_priv,
>     > -                                     ovs_rte_pktmbuf_init, NULL,
>     > -                                     socket_id, 0);
>     > +	dmp->mp = rte_pktmbuf_pool_create(mp_name, mp_size,
>     > +                                          MP_CACHE_SZ,
>     > +                                          sizeof (struct dp_packet)
>     > +                                                 - sizeof (struct rte_mbuf),
>     > +                                          MBUF_SIZE(mtu)
>     > +                                                 - sizeof(struct dp_packet),
>     > +                                          socket_id);
>     >      } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >=
> MIN_NB_MBUF);
>     >
>     >      if (dmp->mp == NULL) {
>     > @@ -514,6 +510,8 @@ dpdk_mp_get(int socket_id, int mtu)
> OVS_REQUIRES(dpdk_mutex)
>     >      } else {
>     >          VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name,
> mp_size );
>     >      }
>     > +    /* pktmbuf only prepare the rte_mbuf, prepare the ovs bufs */
>     > +    rte_mempool_obj_iter(dmp->mp, ovs_rte_pktmbuf_init, NULL);
>     >
>     >      ovs_list_push_back(&dpdk_mp_list, &dmp->list_node);
>     >      return dmp;
>     >
> 
>     _______________________________________________
>     dev mailing list
>     dev@openvswitch.org
>     https://urldefense.proofpoint.com/v2/url?u=https-
> 3A__mail.openvswitch.org_mailman_listinfo_ovs-
> 2Ddev&d=DwICAg&c=uilaK90D4TOVoH58JNXRgQ&r=BVhFA09CGX7JQ5Ih-
> uZnsw&m=-
> W7aEdrfE9YA4xel7blbyN14gy2c_fvP74McC_hsv04&s=cqWnR2ZQv358LHVWn9v
> kS3OHalhgffnveJhn-RfV76k&e=
>
diff mbox

Patch

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 9203de0..8d658ff 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -468,7 +468,6 @@  dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
     struct dpdk_mp *dmp = NULL;
     char mp_name[RTE_MEMPOOL_NAMESIZE];
     unsigned mp_size;
-    struct rte_pktmbuf_pool_private mbp_priv;
 
     LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
         if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
@@ -481,9 +480,6 @@  dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
     dmp->socket_id = socket_id;
     dmp->mtu = mtu;
     dmp->refcount = 1;
-    mbp_priv.mbuf_data_room_size = MBUF_SIZE(mtu) - sizeof(struct dp_packet);
-    mbp_priv.mbuf_priv_size = sizeof (struct dp_packet)
-                              - sizeof (struct rte_mbuf);
     /* XXX: this is a really rough method of provisioning memory.
      * It's impossible to determine what the exact memory requirements are when
      * the number of ports and rxqs that utilize a particular mempool can change
@@ -500,13 +496,13 @@  dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
                      dmp->mtu, dmp->socket_id, mp_size) < 0) {
             goto fail;
         }
-
-        dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
-                                     MP_CACHE_SZ,
-                                     sizeof(struct rte_pktmbuf_pool_private),
-                                     rte_pktmbuf_pool_init, &mbp_priv,
-                                     ovs_rte_pktmbuf_init, NULL,
-                                     socket_id, 0);
+	dmp->mp = rte_pktmbuf_pool_create(mp_name, mp_size,
+                                          MP_CACHE_SZ,
+                                          sizeof (struct dp_packet)
+                                                 - sizeof (struct rte_mbuf),
+                                          MBUF_SIZE(mtu)
+                                                 - sizeof(struct dp_packet),
+                                          socket_id);
     } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
 
     if (dmp->mp == NULL) {
@@ -514,6 +510,8 @@  dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
     } else {
         VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
     }
+    /* pktmbuf only prepare the rte_mbuf, prepare the ovs bufs */
+    rte_mempool_obj_iter(dmp->mp, ovs_rte_pktmbuf_init, NULL);
 
     ovs_list_push_back(&dpdk_mp_list, &dmp->list_node);
     return dmp;