diff mbox series

[ovs-dev,RFC,v3,1/8] netdev-dpdk: simplify mbuf sizing

Message ID 1511288957-68599-2-git-send-email-mark.b.kavanagh@intel.com
State Superseded
Delegated to: Ian Stokes
Headers show
Series netdev-dpdk: support multi-segment mbufs | expand

Commit Message

Mark Kavanagh Nov. 21, 2017, 6:29 p.m. UTC
When calculating the mbuf data_room_size (i.e. the size of actual
packet data that an mbuf can accomodate), it is possible to simply
use the value calculated by dpdk_buf_size() as a parameter to
rte_pktmbuf_pool_create(). This simplifies mbuf sizing considerably.
This patch removes the related size conversions and macros, which
are no longer needed.

The benefits of this approach are threefold:
- the mbuf sizing code is much simpler, and more readable.
- mbuf size will always be cache-aligned [1], satisfying that
  requirement of specific PMDs (vNIC thunderx, for example).
- the maximum amount of data that each mbuf contains may now be
  calculated as mbuf->buf_len - mbuf->data_off. This is important
  in the case of multi-segment jumbo frames.

[1] (this is true since mbuf size is now always a multiple
     of 1024, + 128B RTE_PKTMBUF_HEADROOM + 704B dp_packet).

Fixes: 4be4d22 ("netdev-dpdk: clean up mbuf initialization")
Fixes: 31b88c9 ("netdev-dpdk: round up mbuf_size to cache_line_size")
Signed-off-by: Mark Kavanagh <mark.b.kavanagh@intel.com>
---
 lib/netdev-dpdk.c | 22 ++++++++--------------
 1 file changed, 8 insertions(+), 14 deletions(-)

Comments

Mark Kavanagh Nov. 22, 2017, 1:24 p.m. UTC | #1
+ Santosh

>From: ovs-dev-bounces@openvswitch.org [mailto:ovs-dev-bounces@openvswitch.org]
>On Behalf Of Mark Kavanagh
>Sent: Tuesday, November 21, 2017 6:29 PM
>To: dev@openvswitch.org; qiudayu@chinac.com
>Subject: [ovs-dev] [RFC PATCH v3 1/8] netdev-dpdk: simplify mbuf sizing
>
>When calculating the mbuf data_room_size (i.e. the size of actual
>packet data that an mbuf can accomodate), it is possible to simply
>use the value calculated by dpdk_buf_size() as a parameter to
>rte_pktmbuf_pool_create(). This simplifies mbuf sizing considerably.
>This patch removes the related size conversions and macros, which
>are no longer needed.
>
>The benefits of this approach are threefold:
>- the mbuf sizing code is much simpler, and more readable.
>- mbuf size will always be cache-aligned [1], satisfying that
>  requirement of specific PMDs (vNIC thunderx, for example).
>- the maximum amount of data that each mbuf contains may now be
>  calculated as mbuf->buf_len - mbuf->data_off. This is important
>  in the case of multi-segment jumbo frames.
>
>[1] (this is true since mbuf size is now always a multiple
>     of 1024, + 128B RTE_PKTMBUF_HEADROOM + 704B dp_packet).

Santosh, I've just spotted that the cacheline size for thunderx is defined as 128B, as opposed to 64.
	
	==> config/defconfig_arm64-thunderx-linuxapp-gcc:36:CONFIG_RTE_CACHE_LINE_SIZE=128

Apologies for the oversight - I'd be very interested to hear your thoughts on this patch.

Thanks in advance,
Mark


>
>Fixes: 4be4d22 ("netdev-dpdk: clean up mbuf initialization")
>Fixes: 31b88c9 ("netdev-dpdk: round up mbuf_size to cache_line_size")
>Signed-off-by: Mark Kavanagh <mark.b.kavanagh@intel.com>
>---
> lib/netdev-dpdk.c | 22 ++++++++--------------
> 1 file changed, 8 insertions(+), 14 deletions(-)
>
>diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
>index 8906423..c5eb851 100644
>--- a/lib/netdev-dpdk.c
>+++ b/lib/netdev-dpdk.c
>@@ -81,12 +81,6 @@ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5,
>20);
>                                      + (2 * VLAN_HEADER_LEN))
> #define MTU_TO_FRAME_LEN(mtu)       ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
> #define MTU_TO_MAX_FRAME_LEN(mtu)   ((mtu) + ETHER_HDR_MAX_LEN)
>-#define FRAME_LEN_TO_MTU(frame_len) ((frame_len)                    \
>-                                     - ETHER_HDR_LEN - ETHER_CRC_LEN)
>-#define MBUF_SIZE(mtu)              ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu) \
>-                                             + sizeof(struct dp_packet) \
>-                                             + RTE_PKTMBUF_HEADROOM),   \
>-                                             RTE_CACHE_LINE_SIZE)
> #define NETDEV_DPDK_MBUF_ALIGN      1024
> #define NETDEV_DPDK_MAX_PKT_LEN     9728
>
>@@ -447,7 +441,7 @@ is_dpdk_class(const struct netdev_class *class)
>  * behaviour, which reduces performance. To prevent this, use a buffer size
>  * that is closest to 'mtu', but which satisfies the aforementioned criteria.
>  */
>-static uint32_t
>+static uint16_t
> dpdk_buf_size(int mtu)
> {
>     return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu) + RTE_PKTMBUF_HEADROOM),
>@@ -486,7 +480,7 @@ ovs_rte_pktmbuf_init(struct rte_mempool *mp OVS_UNUSED,
>  *  - a new mempool was just created;
>  *  - a matching mempool already exists. */
> static struct rte_mempool *
>-dpdk_mp_create(struct netdev_dpdk *dev, int mtu)
>+dpdk_mp_create(struct netdev_dpdk *dev, uint16_t frame_len)
> {
>     char mp_name[RTE_MEMPOOL_NAMESIZE];
>     const char *netdev_name = netdev_get_name(&dev->up);
>@@ -513,12 +507,12 @@ dpdk_mp_create(struct netdev_dpdk *dev, int mtu)
>          * longer than RTE_MEMPOOL_NAMESIZE. */
>         int ret = snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
>                            "ovs%08x%02d%05d%07u",
>-                           hash, socket_id, mtu, n_mbufs);
>+                           hash, socket_id, frame_len, n_mbufs);
>         if (ret < 0 || ret >= RTE_MEMPOOL_NAMESIZE) {
>             VLOG_DBG("snprintf returned %d. "
>                      "Failed to generate a mempool name for \"%s\". "
>-                     "Hash:0x%x, socket_id: %d, mtu:%d, mbufs:%u.",
>-                     ret, netdev_name, hash, socket_id, mtu, n_mbufs);
>+                     "Hash:0x%x, socket_id: %d, frame length:%d, mbufs:%u.",
>+                     ret, netdev_name, hash, socket_id, frame_len, n_mbufs);
>             break;
>         }
>
>@@ -529,7 +523,7 @@ dpdk_mp_create(struct netdev_dpdk *dev, int mtu)
>
>         mp = rte_pktmbuf_pool_create(mp_name, n_mbufs, MP_CACHE_SZ,
>                  sizeof (struct dp_packet) - sizeof (struct rte_mbuf),
>-                 MBUF_SIZE(mtu) - sizeof(struct dp_packet), socket_id);
>+                 frame_len + RTE_PKTMBUF_HEADROOM, socket_id);
>
>         if (mp) {
>             VLOG_DBG("Allocated \"%s\" mempool with %u mbufs",
>@@ -582,11 +576,11 @@ static int
> netdev_dpdk_mempool_configure(struct netdev_dpdk *dev)
>     OVS_REQUIRES(dev->mutex)
> {
>-    uint32_t buf_size = dpdk_buf_size(dev->requested_mtu);
>+    uint16_t buf_size = dpdk_buf_size(dev->requested_mtu);
>     struct rte_mempool *mp;
>     int ret = 0;
>
>-    mp = dpdk_mp_create(dev, FRAME_LEN_TO_MTU(buf_size));
>+    mp = dpdk_mp_create(dev, buf_size);
>     if (!mp) {
>         VLOG_ERR("Failed to create memory pool for netdev "
>                  "%s, with MTU %d on socket %d: %s\n",
>--
>1.9.3
>
>_______________________________________________
>dev mailing list
>dev@openvswitch.org
>https://mail.openvswitch.org/mailman/listinfo/ovs-dev
santosh June 12, 2018, 3:56 a.m. UTC | #2
On Wednesday 22 November 2017 06:54 PM, Kavanagh, Mark B wrote:
> + Santosh
>
>> From: ovs-dev-bounces@openvswitch.org [mailto:ovs-dev-bounces@openvswitch.org]
>> On Behalf Of Mark Kavanagh
>> Sent: Tuesday, November 21, 2017 6:29 PM
>> To: dev@openvswitch.org; qiudayu@chinac.com
>> Subject: [ovs-dev] [RFC PATCH v3 1/8] netdev-dpdk: simplify mbuf sizing
>>
>> When calculating the mbuf data_room_size (i.e. the size of actual
>> packet data that an mbuf can accomodate), it is possible to simply
>> use the value calculated by dpdk_buf_size() as a parameter to
>> rte_pktmbuf_pool_create(). This simplifies mbuf sizing considerably.
>> This patch removes the related size conversions and macros, which
>> are no longer needed.
>>
>> The benefits of this approach are threefold:
>> - the mbuf sizing code is much simpler, and more readable.
>> - mbuf size will always be cache-aligned [1], satisfying that
>>  requirement of specific PMDs (vNIC thunderx, for example).
>> - the maximum amount of data that each mbuf contains may now be
>>  calculated as mbuf->buf_len - mbuf->data_off. This is important
>>  in the case of multi-segment jumbo frames.
>>
>> [1] (this is true since mbuf size is now always a multiple
>>     of 1024, + 128B RTE_PKTMBUF_HEADROOM + 704B dp_packet).
> Santosh, I've just spotted that the cacheline size for thunderx is defined as 128B, as opposed to 64.
> 	
> 	==> config/defconfig_arm64-thunderx-linuxapp-gcc:36:CONFIG_RTE_CACHE_LINE_SIZE=128
>
> Apologies for the oversight - I'd be very interested to hear your thoughts on this patch.
>
> Thanks in advance,
> Mark
>
Sorry for late reply.
I'm reviewing changeset now and Will share my feedback on series:
[PATCH v8 01/13] netdev-dpdk: fix mbuf sizing.

Thanks.
diff mbox series

Patch

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 8906423..c5eb851 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -81,12 +81,6 @@  static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
                                      + (2 * VLAN_HEADER_LEN))
 #define MTU_TO_FRAME_LEN(mtu)       ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
 #define MTU_TO_MAX_FRAME_LEN(mtu)   ((mtu) + ETHER_HDR_MAX_LEN)
-#define FRAME_LEN_TO_MTU(frame_len) ((frame_len)                    \
-                                     - ETHER_HDR_LEN - ETHER_CRC_LEN)
-#define MBUF_SIZE(mtu)              ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu) \
-                                             + sizeof(struct dp_packet) \
-                                             + RTE_PKTMBUF_HEADROOM),   \
-                                             RTE_CACHE_LINE_SIZE)
 #define NETDEV_DPDK_MBUF_ALIGN      1024
 #define NETDEV_DPDK_MAX_PKT_LEN     9728
 
@@ -447,7 +441,7 @@  is_dpdk_class(const struct netdev_class *class)
  * behaviour, which reduces performance. To prevent this, use a buffer size
  * that is closest to 'mtu', but which satisfies the aforementioned criteria.
  */
-static uint32_t
+static uint16_t
 dpdk_buf_size(int mtu)
 {
     return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu) + RTE_PKTMBUF_HEADROOM),
@@ -486,7 +480,7 @@  ovs_rte_pktmbuf_init(struct rte_mempool *mp OVS_UNUSED,
  *  - a new mempool was just created;
  *  - a matching mempool already exists. */
 static struct rte_mempool *
-dpdk_mp_create(struct netdev_dpdk *dev, int mtu)
+dpdk_mp_create(struct netdev_dpdk *dev, uint16_t frame_len)
 {
     char mp_name[RTE_MEMPOOL_NAMESIZE];
     const char *netdev_name = netdev_get_name(&dev->up);
@@ -513,12 +507,12 @@  dpdk_mp_create(struct netdev_dpdk *dev, int mtu)
          * longer than RTE_MEMPOOL_NAMESIZE. */
         int ret = snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
                            "ovs%08x%02d%05d%07u",
-                           hash, socket_id, mtu, n_mbufs);
+                           hash, socket_id, frame_len, n_mbufs);
         if (ret < 0 || ret >= RTE_MEMPOOL_NAMESIZE) {
             VLOG_DBG("snprintf returned %d. "
                      "Failed to generate a mempool name for \"%s\". "
-                     "Hash:0x%x, socket_id: %d, mtu:%d, mbufs:%u.",
-                     ret, netdev_name, hash, socket_id, mtu, n_mbufs);
+                     "Hash:0x%x, socket_id: %d, frame length:%d, mbufs:%u.",
+                     ret, netdev_name, hash, socket_id, frame_len, n_mbufs);
             break;
         }
 
@@ -529,7 +523,7 @@  dpdk_mp_create(struct netdev_dpdk *dev, int mtu)
 
         mp = rte_pktmbuf_pool_create(mp_name, n_mbufs, MP_CACHE_SZ,
                  sizeof (struct dp_packet) - sizeof (struct rte_mbuf),
-                 MBUF_SIZE(mtu) - sizeof(struct dp_packet), socket_id);
+                 frame_len + RTE_PKTMBUF_HEADROOM, socket_id);
 
         if (mp) {
             VLOG_DBG("Allocated \"%s\" mempool with %u mbufs",
@@ -582,11 +576,11 @@  static int
 netdev_dpdk_mempool_configure(struct netdev_dpdk *dev)
     OVS_REQUIRES(dev->mutex)
 {
-    uint32_t buf_size = dpdk_buf_size(dev->requested_mtu);
+    uint16_t buf_size = dpdk_buf_size(dev->requested_mtu);
     struct rte_mempool *mp;
     int ret = 0;
 
-    mp = dpdk_mp_create(dev, FRAME_LEN_TO_MTU(buf_size));
+    mp = dpdk_mp_create(dev, buf_size);
     if (!mp) {
         VLOG_ERR("Failed to create memory pool for netdev "
                  "%s, with MTU %d on socket %d: %s\n",