Message ID | 20210210152702.4898-7-elibr@nvidia.com |
---|---|
State | Changes Requested |
Headers | show |
Series | Netdev vxlan-decap offload | expand |
On Wed, Feb 10, 2021 at 8:57 PM Eli Britstein <elibr@nvidia.com> wrote: > > Recover the packet if it was partially processed by the HW. Fallback to > lookup flow by mark association. > > Signed-off-by: Eli Britstein <elibr@nvidia.com> > Reviewed-by: Gaetan Rivet <gaetanr@nvidia.com> > --- > lib/dpif-netdev.c | 46 ++++++++++++++++++++++++++++++---------------- > 1 file changed, 30 insertions(+), 16 deletions(-) > > diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c > index e3fd0a07f..09e86631e 100644 > --- a/lib/dpif-netdev.c > +++ b/lib/dpif-netdev.c > @@ -7036,6 +7036,10 @@ smc_lookup_batch(struct dp_netdev_pmd_thread *pmd, > pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_SMC_HIT, n_smc_hit); > } > > +static struct tx_port * > +pmd_send_port_cache_lookup(const struct dp_netdev_pmd_thread *pmd, > + odp_port_t port_no); > + > /* Try to process all ('cnt') the 'packets' using only the datapath flow cache > * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the > * miniflow is copied into 'keys' and the packet pointer is moved at the > @@ -7099,23 +7103,33 @@ dfc_processing(struct dp_netdev_pmd_thread *pmd, > pkt_metadata_init(&packet->md, port_no); > } > > - if ((*recirc_depth_get() == 0) && > - dp_packet_has_flow_mark(packet, &mark)) { > - flow = mark_to_flow_find(pmd, mark); > - if (OVS_LIKELY(flow)) { > - tcp_flags = parse_tcp_flags(packet); > - if (OVS_LIKELY(batch_enable)) { > - dp_netdev_queue_batches(packet, flow, tcp_flags, batches, > - n_batches); > - } else { > - /* Flow batching should be performed only after fast-path > - * processing is also completed for packets with emc miss > - * or else it will result in reordering of packets with > - * same datapath flows. */ > - packet_enqueue_to_flow_map(packet, flow, tcp_flags, > - flow_map, map_cnt++); > + if (*recirc_depth_get() == 0) { > + /* Restore the packet if HW processing was terminated before > + * completion. > + */ > + struct tx_port *p; > + > + tcp_flags = parse_tcp_flags(packet); > + p = pmd_send_port_cache_lookup(pmd, port_no); > + if (!p || netdev_hw_miss_packet_recover(p->port->netdev, packet)) { > + if (dp_packet_has_flow_mark(packet, &mark)) { > + flow = mark_to_flow_find(pmd, mark); > + if (OVS_LIKELY(flow)) { > + if (OVS_LIKELY(batch_enable)) { > + dp_netdev_queue_batches(packet, flow, tcp_flags, > + batches, n_batches); > + } else { > + /* Flow batching should be performed only after > + * fast-path processing is also completed for > + * packets with emc miss or else it will result in > + * reordering of packets with same datapath flows. > + */ > + packet_enqueue_to_flow_map(packet, flow, tcp_flags, > + flow_map, map_cnt++); > + } > + continue; > + } > } > - continue; > } > } The above logic should be changed to avoid hw_miss_packet_recover() when hw-offload is not enabled: if (*recirc_depth_get() == 0) { ... ... if (netdev_is_flow_api_enabled()) { p = pmd_send_port_cache_lookup(pmd, port_no); if (OVS_UNLIKELY(p && !netdev_hw_miss_packet_recover(p->port->netdev, packet))) { goto miniflow; } } if (dp_packet_has_flow_mark(packet, &mark)) { flow = mark_to_flow_find(pmd, mark); if (OVS_LIKELY(flow)) { ... } } } miniflow: miniflow_extract(); > > -- > 2.28.0.546.g385c171 >
On 2/24/2021 7:36 AM, Sriharsha Basavapatna wrote: > On Wed, Feb 10, 2021 at 8:57 PM Eli Britstein <elibr@nvidia.com> wrote: >> Recover the packet if it was partially processed by the HW. Fallback to >> lookup flow by mark association. >> >> Signed-off-by: Eli Britstein <elibr@nvidia.com> >> Reviewed-by: Gaetan Rivet <gaetanr@nvidia.com> >> --- >> lib/dpif-netdev.c | 46 ++++++++++++++++++++++++++++++---------------- >> 1 file changed, 30 insertions(+), 16 deletions(-) >> >> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c >> index e3fd0a07f..09e86631e 100644 >> --- a/lib/dpif-netdev.c >> +++ b/lib/dpif-netdev.c >> @@ -7036,6 +7036,10 @@ smc_lookup_batch(struct dp_netdev_pmd_thread *pmd, >> pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_SMC_HIT, n_smc_hit); >> } >> >> +static struct tx_port * >> +pmd_send_port_cache_lookup(const struct dp_netdev_pmd_thread *pmd, >> + odp_port_t port_no); >> + >> /* Try to process all ('cnt') the 'packets' using only the datapath flow cache >> * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the >> * miniflow is copied into 'keys' and the packet pointer is moved at the >> @@ -7099,23 +7103,33 @@ dfc_processing(struct dp_netdev_pmd_thread *pmd, >> pkt_metadata_init(&packet->md, port_no); >> } >> >> - if ((*recirc_depth_get() == 0) && >> - dp_packet_has_flow_mark(packet, &mark)) { >> - flow = mark_to_flow_find(pmd, mark); >> - if (OVS_LIKELY(flow)) { >> - tcp_flags = parse_tcp_flags(packet); >> - if (OVS_LIKELY(batch_enable)) { >> - dp_netdev_queue_batches(packet, flow, tcp_flags, batches, >> - n_batches); >> - } else { >> - /* Flow batching should be performed only after fast-path >> - * processing is also completed for packets with emc miss >> - * or else it will result in reordering of packets with >> - * same datapath flows. */ >> - packet_enqueue_to_flow_map(packet, flow, tcp_flags, >> - flow_map, map_cnt++); >> + if (*recirc_depth_get() == 0) { >> + /* Restore the packet if HW processing was terminated before >> + * completion. >> + */ >> + struct tx_port *p; >> + >> + tcp_flags = parse_tcp_flags(packet); >> + p = pmd_send_port_cache_lookup(pmd, port_no); >> + if (!p || netdev_hw_miss_packet_recover(p->port->netdev, packet)) { >> + if (dp_packet_has_flow_mark(packet, &mark)) { >> + flow = mark_to_flow_find(pmd, mark); >> + if (OVS_LIKELY(flow)) { >> + if (OVS_LIKELY(batch_enable)) { >> + dp_netdev_queue_batches(packet, flow, tcp_flags, >> + batches, n_batches); >> + } else { >> + /* Flow batching should be performed only after >> + * fast-path processing is also completed for >> + * packets with emc miss or else it will result in >> + * reordering of packets with same datapath flows. >> + */ >> + packet_enqueue_to_flow_map(packet, flow, tcp_flags, >> + flow_map, map_cnt++); >> + } >> + continue; >> + } >> } >> - continue; >> } >> } > The above logic should be changed to avoid hw_miss_packet_recover() > when hw-offload is not enabled: > > if (*recirc_depth_get() == 0) { > ... > ... > if (netdev_is_flow_api_enabled()) { > p = pmd_send_port_cache_lookup(pmd, port_no); > if (OVS_UNLIKELY(p && !netdev_hw_miss_packet_recover(p->port->netdev, > packet))) { > goto miniflow; > } > } > if (dp_packet_has_flow_mark(packet, &mark)) { > flow = mark_to_flow_find(pmd, mark); > if (OVS_LIKELY(flow)) { > ... > } > } > } > > miniflow: > miniflow_extract(); OK, I will refactor to introduce usage of netdev_is_flow_api_enabled() and clarify. Thanks. > >> -- >> 2.28.0.546.g385c171 >>
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index e3fd0a07f..09e86631e 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -7036,6 +7036,10 @@ smc_lookup_batch(struct dp_netdev_pmd_thread *pmd, pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_SMC_HIT, n_smc_hit); } +static struct tx_port * +pmd_send_port_cache_lookup(const struct dp_netdev_pmd_thread *pmd, + odp_port_t port_no); + /* Try to process all ('cnt') the 'packets' using only the datapath flow cache * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the * miniflow is copied into 'keys' and the packet pointer is moved at the @@ -7099,23 +7103,33 @@ dfc_processing(struct dp_netdev_pmd_thread *pmd, pkt_metadata_init(&packet->md, port_no); } - if ((*recirc_depth_get() == 0) && - dp_packet_has_flow_mark(packet, &mark)) { - flow = mark_to_flow_find(pmd, mark); - if (OVS_LIKELY(flow)) { - tcp_flags = parse_tcp_flags(packet); - if (OVS_LIKELY(batch_enable)) { - dp_netdev_queue_batches(packet, flow, tcp_flags, batches, - n_batches); - } else { - /* Flow batching should be performed only after fast-path - * processing is also completed for packets with emc miss - * or else it will result in reordering of packets with - * same datapath flows. */ - packet_enqueue_to_flow_map(packet, flow, tcp_flags, - flow_map, map_cnt++); + if (*recirc_depth_get() == 0) { + /* Restore the packet if HW processing was terminated before + * completion. + */ + struct tx_port *p; + + tcp_flags = parse_tcp_flags(packet); + p = pmd_send_port_cache_lookup(pmd, port_no); + if (!p || netdev_hw_miss_packet_recover(p->port->netdev, packet)) { + if (dp_packet_has_flow_mark(packet, &mark)) { + flow = mark_to_flow_find(pmd, mark); + if (OVS_LIKELY(flow)) { + if (OVS_LIKELY(batch_enable)) { + dp_netdev_queue_batches(packet, flow, tcp_flags, + batches, n_batches); + } else { + /* Flow batching should be performed only after + * fast-path processing is also completed for + * packets with emc miss or else it will result in + * reordering of packets with same datapath flows. + */ + packet_enqueue_to_flow_map(packet, flow, tcp_flags, + flow_map, map_cnt++); + } + continue; + } } - continue; } }