Message ID | 1500480297-7530-1-git-send-email-antonio.fischetti@intel.com |
---|---|
State | Superseded |
Delegated to: | Darrell Ball |
Headers | show |
Hi Antionio, This looks like a reasonable change to me. Can you add some performance statistics for when dealing with re-circulated packets? /Billy. > -----Original Message----- > From: ovs-dev-bounces@openvswitch.org [mailto:ovs-dev- > bounces@openvswitch.org] On Behalf Of antonio.fischetti@intel.com > Sent: Wednesday, July 19, 2017 5:05 PM > To: dev@openvswitch.org > Subject: [ovs-dev] [PATCH v2 1/5] dpif-netdev: move pkt metadata init out > of emc_processing. > > Packet metadata initialization is moved into dp_netdev_input to improve > performance. > > Signed-off-by: Antonio Fischetti <antonio.fischetti@intel.com> > --- > In my testbench with the following port to port flow setup: > in_port=1,action=output:2 > in_port=2,action=output:1 > > I measured packet Rx rate (regardless of packet loss) in a Bidirectional test > with 64B UDP packets. > > I saw the following performance improvement > > Orig: 11.30, 11.54 Mpps > Orig + patch: 11.70, 11.76 Mpps > > lib/dpif-netdev.c | 21 ++++++++++----------- > 1 file changed, 10 insertions(+), 11 deletions(-) > > diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 98e7765..123e04a > 100644 > --- a/lib/dpif-netdev.c > +++ b/lib/dpif-netdev.c > @@ -4572,8 +4572,7 @@ static inline size_t emc_processing(struct > dp_netdev_pmd_thread *pmd, > struct dp_packet_batch *packets_, > struct netdev_flow_key *keys, > - struct packet_batch_per_flow batches[], size_t *n_batches, > - bool md_is_valid, odp_port_t port_no) > + struct packet_batch_per_flow batches[], size_t > + *n_batches) > { > struct emc_cache *flow_cache = &pmd->flow_cache; > struct netdev_flow_key *key = &keys[0]; @@ -4601,9 +4600,6 @@ > emc_processing(struct dp_netdev_pmd_thread *pmd, > pkt_metadata_prefetch_init(&packets[i+1]->md); > } > > - if (!md_is_valid) { > - pkt_metadata_init(&packet->md, port_no); > - } > miniflow_extract(packet, &key->mf); > key->len = 0; /* Not computed yet. */ > key->hash = dpif_netdev_packet_get_rss_hash(packet, &key->mf); > @@ -4805,8 +4801,7 @@ fast_path_processing(struct > dp_netdev_pmd_thread *pmd, > * valid, 'md_is_valid' must be true and 'port_no' will be ignored. */ static > void dp_netdev_input__(struct dp_netdev_pmd_thread *pmd, > - struct dp_packet_batch *packets, > - bool md_is_valid, odp_port_t port_no) > + struct dp_packet_batch *packets) > { > int cnt = packets->count; > #if !defined(__CHECKER__) && !defined(_WIN32) @@ -4823,8 +4818,7 @@ > dp_netdev_input__(struct dp_netdev_pmd_thread *pmd, > odp_port_t in_port; > > n_batches = 0; > - emc_processing(pmd, packets, keys, batches, &n_batches, > - md_is_valid, port_no); > + emc_processing(pmd, packets, keys, batches, &n_batches); > if (!dp_packet_batch_is_empty(packets)) { > /* Get ingress port from first packet's metadata. */ > in_port = packets->packets[0]->md.in_port.odp_port; > @@ -4856,14 +4850,19 @@ dp_netdev_input(struct > dp_netdev_pmd_thread *pmd, > struct dp_packet_batch *packets, > odp_port_t port_no) > { > - dp_netdev_input__(pmd, packets, false, port_no); > + struct dp_packet *packet; > + DP_PACKET_BATCH_FOR_EACH (packet, packets) { > + pkt_metadata_init(&packet->md, port_no); > + } > + > + dp_netdev_input__(pmd, packets); > } > > static void > dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd, > struct dp_packet_batch *packets) { > - dp_netdev_input__(pmd, packets, true, 0); > + dp_netdev_input__(pmd, packets); > } > > struct dp_netdev_execute_aux { > -- > 2.4.11 > > _______________________________________________ > dev mailing list > dev@openvswitch.org > https://mail.openvswitch.org/mailman/listinfo/ovs-dev
There is also a reference to md_is_valid is the comments of emc_processing that needs to be removed. > -----Original Message----- > From: O Mahony, Billy > Sent: Monday, July 31, 2017 4:04 PM > To: 'antonio.fischetti@intel.com' <antonio.fischetti@intel.com>; > dev@openvswitch.org > Subject: RE: [ovs-dev] [PATCH v2 1/5] dpif-netdev: move pkt metadata init > out of emc_processing. > > Hi Antionio, > > This looks like a reasonable change to me. > > Can you add some performance statistics for when dealing with re-circulated > packets? > > /Billy. > > > -----Original Message----- > > From: ovs-dev-bounces@openvswitch.org [mailto:ovs-dev- > > bounces@openvswitch.org] On Behalf Of antonio.fischetti@intel.com > > Sent: Wednesday, July 19, 2017 5:05 PM > > To: dev@openvswitch.org > > Subject: [ovs-dev] [PATCH v2 1/5] dpif-netdev: move pkt metadata init > > out of emc_processing. > > > > Packet metadata initialization is moved into dp_netdev_input to > > improve performance. > > > > Signed-off-by: Antonio Fischetti <antonio.fischetti@intel.com> > > --- > > In my testbench with the following port to port flow setup: > > in_port=1,action=output:2 > > in_port=2,action=output:1 > > > > I measured packet Rx rate (regardless of packet loss) in a > > Bidirectional test with 64B UDP packets. > > > > I saw the following performance improvement > > > > Orig: 11.30, 11.54 Mpps > > Orig + patch: 11.70, 11.76 Mpps > > > > lib/dpif-netdev.c | 21 ++++++++++----------- > > 1 file changed, 10 insertions(+), 11 deletions(-) > > > > diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index > > 98e7765..123e04a > > 100644 > > --- a/lib/dpif-netdev.c > > +++ b/lib/dpif-netdev.c > > @@ -4572,8 +4572,7 @@ static inline size_t emc_processing(struct > > dp_netdev_pmd_thread *pmd, > > struct dp_packet_batch *packets_, > > struct netdev_flow_key *keys, > > - struct packet_batch_per_flow batches[], size_t *n_batches, > > - bool md_is_valid, odp_port_t port_no) > > + struct packet_batch_per_flow batches[], size_t > > + *n_batches) > > { > > struct emc_cache *flow_cache = &pmd->flow_cache; > > struct netdev_flow_key *key = &keys[0]; @@ -4601,9 +4600,6 @@ > > emc_processing(struct dp_netdev_pmd_thread *pmd, > > pkt_metadata_prefetch_init(&packets[i+1]->md); > > } > > > > - if (!md_is_valid) { > > - pkt_metadata_init(&packet->md, port_no); > > - } > > miniflow_extract(packet, &key->mf); > > key->len = 0; /* Not computed yet. */ > > key->hash = dpif_netdev_packet_get_rss_hash(packet, > > &key->mf); @@ -4805,8 +4801,7 @@ fast_path_processing(struct > > dp_netdev_pmd_thread *pmd, > > * valid, 'md_is_valid' must be true and 'port_no' will be ignored. > > */ static void dp_netdev_input__(struct dp_netdev_pmd_thread *pmd, > > - struct dp_packet_batch *packets, > > - bool md_is_valid, odp_port_t port_no) > > + struct dp_packet_batch *packets) > > { > > int cnt = packets->count; > > #if !defined(__CHECKER__) && !defined(_WIN32) @@ -4823,8 +4818,7 > @@ > > dp_netdev_input__(struct dp_netdev_pmd_thread *pmd, > > odp_port_t in_port; > > > > n_batches = 0; > > - emc_processing(pmd, packets, keys, batches, &n_batches, > > - md_is_valid, port_no); > > + emc_processing(pmd, packets, keys, batches, &n_batches); > > if (!dp_packet_batch_is_empty(packets)) { > > /* Get ingress port from first packet's metadata. */ > > in_port = packets->packets[0]->md.in_port.odp_port; > > @@ -4856,14 +4850,19 @@ dp_netdev_input(struct > dp_netdev_pmd_thread > > *pmd, > > struct dp_packet_batch *packets, > > odp_port_t port_no) > > { > > - dp_netdev_input__(pmd, packets, false, port_no); > > + struct dp_packet *packet; > > + DP_PACKET_BATCH_FOR_EACH (packet, packets) { > > + pkt_metadata_init(&packet->md, port_no); > > + } > > + > > + dp_netdev_input__(pmd, packets); > > } > > > > static void > > dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd, > > struct dp_packet_batch *packets) { > > - dp_netdev_input__(pmd, packets, true, 0); > > + dp_netdev_input__(pmd, packets); > > } > > > > struct dp_netdev_execute_aux { > > -- > > 2.4.11 > > > > _______________________________________________ > > dev mailing list > > dev@openvswitch.org > > https://mail.openvswitch.org/mailman/listinfo/ovs-dev
> -----Original Message----- > From: O Mahony, Billy > Sent: Monday, July 31, 2017 4:33 PM > To: Fischetti, Antonio <antonio.fischetti@intel.com>; dev@openvswitch.org > Subject: RE: [ovs-dev] [PATCH v2 1/5] dpif-netdev: move pkt metadata init out > of emc_processing. > > There is also a reference to md_is_valid is the comments of emc_processing that > needs to be removed. [Antonio] thanks will fix. > > > -----Original Message----- > > From: O Mahony, Billy > > Sent: Monday, July 31, 2017 4:04 PM > > To: 'antonio.fischetti@intel.com' <antonio.fischetti@intel.com>; > > dev@openvswitch.org > > Subject: RE: [ovs-dev] [PATCH v2 1/5] dpif-netdev: move pkt metadata init > > out of emc_processing. > > > > Hi Antionio, > > > > This looks like a reasonable change to me. > > > > Can you add some performance statistics for when dealing with re-circulated > > packets? [Antonio] I used the ConnectionTracker setup for a firewall like table=0, priority=100,ct_state=-trk,ip actions=ct(table=1) table=0, priority=10,arp actions=NORMAL table=0, priority=1 actions=drop table=1, ct_state=+new+trk,ip,in_port=dpdk0 actions=ct(commit),output:dpdk1 table=1, ct_state=+new+trk,ip,in_port=dpdk1 actions=drop table=1, ct_state=+est+trk,ip,in_port=dpdk0 actions=output:dpdk1 table=1, ct_state=+est+trk,ip,in_port=dpdk1 actions=output:dpdk0 I was sending 10 different UDP streams from the traffic generator, 2 PMDs with 3 Tx queues, 64 B packets at the line rate. I got the following performance figures when I measured the received pkt rate [Mpps] on the 2 sides, regardless of packet loss. Original OvS-DPDK 2.58 2.60 + this patch 2.67 2.71 I didn't test with more UDP streams because - as the performance drops - the relative improvement becomes so small that is less visible and measurable. > > > > /Billy. > > > > > -----Original Message----- > > > From: ovs-dev-bounces@openvswitch.org [mailto:ovs-dev- > > > bounces@openvswitch.org] On Behalf Of antonio.fischetti@intel.com > > > Sent: Wednesday, July 19, 2017 5:05 PM > > > To: dev@openvswitch.org > > > Subject: [ovs-dev] [PATCH v2 1/5] dpif-netdev: move pkt metadata init > > > out of emc_processing. > > > > > > Packet metadata initialization is moved into dp_netdev_input to > > > improve performance. > > > > > > Signed-off-by: Antonio Fischetti <antonio.fischetti@intel.com> > > > --- > > > In my testbench with the following port to port flow setup: > > > in_port=1,action=output:2 > > > in_port=2,action=output:1 > > > > > > I measured packet Rx rate (regardless of packet loss) in a > > > Bidirectional test with 64B UDP packets. > > > > > > I saw the following performance improvement > > > > > > Orig: 11.30, 11.54 Mpps > > > Orig + patch: 11.70, 11.76 Mpps > > > > > > lib/dpif-netdev.c | 21 ++++++++++----------- > > > 1 file changed, 10 insertions(+), 11 deletions(-) > > > > > > diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index > > > 98e7765..123e04a > > > 100644 > > > --- a/lib/dpif-netdev.c > > > +++ b/lib/dpif-netdev.c > > > @@ -4572,8 +4572,7 @@ static inline size_t emc_processing(struct > > > dp_netdev_pmd_thread *pmd, > > > struct dp_packet_batch *packets_, > > > struct netdev_flow_key *keys, > > > - struct packet_batch_per_flow batches[], size_t *n_batches, > > > - bool md_is_valid, odp_port_t port_no) > > > + struct packet_batch_per_flow batches[], size_t > > > + *n_batches) > > > { > > > struct emc_cache *flow_cache = &pmd->flow_cache; > > > struct netdev_flow_key *key = &keys[0]; @@ -4601,9 +4600,6 @@ > > > emc_processing(struct dp_netdev_pmd_thread *pmd, > > > pkt_metadata_prefetch_init(&packets[i+1]->md); > > > } > > > > > > - if (!md_is_valid) { > > > - pkt_metadata_init(&packet->md, port_no); > > > - } > > > miniflow_extract(packet, &key->mf); > > > key->len = 0; /* Not computed yet. */ > > > key->hash = dpif_netdev_packet_get_rss_hash(packet, > > > &key->mf); @@ -4805,8 +4801,7 @@ fast_path_processing(struct > > > dp_netdev_pmd_thread *pmd, > > > * valid, 'md_is_valid' must be true and 'port_no' will be ignored. > > > */ static void dp_netdev_input__(struct dp_netdev_pmd_thread *pmd, > > > - struct dp_packet_batch *packets, > > > - bool md_is_valid, odp_port_t port_no) > > > + struct dp_packet_batch *packets) > > > { > > > int cnt = packets->count; > > > #if !defined(__CHECKER__) && !defined(_WIN32) @@ -4823,8 +4818,7 > > @@ > > > dp_netdev_input__(struct dp_netdev_pmd_thread *pmd, > > > odp_port_t in_port; > > > > > > n_batches = 0; > > > - emc_processing(pmd, packets, keys, batches, &n_batches, > > > - md_is_valid, port_no); > > > + emc_processing(pmd, packets, keys, batches, &n_batches); > > > if (!dp_packet_batch_is_empty(packets)) { > > > /* Get ingress port from first packet's metadata. */ > > > in_port = packets->packets[0]->md.in_port.odp_port; > > > @@ -4856,14 +4850,19 @@ dp_netdev_input(struct > > dp_netdev_pmd_thread > > > *pmd, > > > struct dp_packet_batch *packets, > > > odp_port_t port_no) > > > { > > > - dp_netdev_input__(pmd, packets, false, port_no); > > > + struct dp_packet *packet; > > > + DP_PACKET_BATCH_FOR_EACH (packet, packets) { > > > + pkt_metadata_init(&packet->md, port_no); > > > + } > > > + > > > + dp_netdev_input__(pmd, packets); > > > } > > > > > > static void > > > dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd, > > > struct dp_packet_batch *packets) { > > > - dp_netdev_input__(pmd, packets, true, 0); > > > + dp_netdev_input__(pmd, packets); > > > } > > > > > > struct dp_netdev_execute_aux { > > > -- > > > 2.4.11 > > > > > > _______________________________________________ > > > dev mailing list > > > dev@openvswitch.org > > > https://mail.openvswitch.org/mailman/listinfo/ovs-dev
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 98e7765..123e04a 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -4572,8 +4572,7 @@ static inline size_t emc_processing(struct dp_netdev_pmd_thread *pmd, struct dp_packet_batch *packets_, struct netdev_flow_key *keys, - struct packet_batch_per_flow batches[], size_t *n_batches, - bool md_is_valid, odp_port_t port_no) + struct packet_batch_per_flow batches[], size_t *n_batches) { struct emc_cache *flow_cache = &pmd->flow_cache; struct netdev_flow_key *key = &keys[0]; @@ -4601,9 +4600,6 @@ emc_processing(struct dp_netdev_pmd_thread *pmd, pkt_metadata_prefetch_init(&packets[i+1]->md); } - if (!md_is_valid) { - pkt_metadata_init(&packet->md, port_no); - } miniflow_extract(packet, &key->mf); key->len = 0; /* Not computed yet. */ key->hash = dpif_netdev_packet_get_rss_hash(packet, &key->mf); @@ -4805,8 +4801,7 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd, * valid, 'md_is_valid' must be true and 'port_no' will be ignored. */ static void dp_netdev_input__(struct dp_netdev_pmd_thread *pmd, - struct dp_packet_batch *packets, - bool md_is_valid, odp_port_t port_no) + struct dp_packet_batch *packets) { int cnt = packets->count; #if !defined(__CHECKER__) && !defined(_WIN32) @@ -4823,8 +4818,7 @@ dp_netdev_input__(struct dp_netdev_pmd_thread *pmd, odp_port_t in_port; n_batches = 0; - emc_processing(pmd, packets, keys, batches, &n_batches, - md_is_valid, port_no); + emc_processing(pmd, packets, keys, batches, &n_batches); if (!dp_packet_batch_is_empty(packets)) { /* Get ingress port from first packet's metadata. */ in_port = packets->packets[0]->md.in_port.odp_port; @@ -4856,14 +4850,19 @@ dp_netdev_input(struct dp_netdev_pmd_thread *pmd, struct dp_packet_batch *packets, odp_port_t port_no) { - dp_netdev_input__(pmd, packets, false, port_no); + struct dp_packet *packet; + DP_PACKET_BATCH_FOR_EACH (packet, packets) { + pkt_metadata_init(&packet->md, port_no); + } + + dp_netdev_input__(pmd, packets); } static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd, struct dp_packet_batch *packets) { - dp_netdev_input__(pmd, packets, true, 0); + dp_netdev_input__(pmd, packets); } struct dp_netdev_execute_aux {
Packet metadata initialization is moved into dp_netdev_input to improve performance. Signed-off-by: Antonio Fischetti <antonio.fischetti@intel.com> --- In my testbench with the following port to port flow setup: in_port=1,action=output:2 in_port=2,action=output:1 I measured packet Rx rate (regardless of packet loss) in a Bidirectional test with 64B UDP packets. I saw the following performance improvement Orig: 11.30, 11.54 Mpps Orig + patch: 11.70, 11.76 Mpps lib/dpif-netdev.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-)