diff mbox series

[ovs-dev] ipf: Fix ovs ipf crash.

Message ID 20240522021957.2292-1-15310488637@163.com
State Superseded
Headers show
Series [ovs-dev] ipf: Fix ovs ipf crash. | expand

Checks

Context Check Description
ovsrobot/apply-robot warning apply and check: warning
ovsrobot/github-robot-_Build_and_Test success github build: passed
ovsrobot/intel-ovs-compilation success test: success

Commit Message

laixiangwu May 22, 2024, 2:19 a.m. UTC
Description:

when 1) The fragment timeout is between 15 seconds and 25 seconds; 2) 
The ipf_list currently has received more than 32 fragments, and there 
are other fragments of same big packet that have not been received.

When the above two scenario conditions are met, due to exceeding the 
capacity of the packet batch(here is 32), ipf_dp_packet_batch_add 
returns false, and ipf_list will not be cleared. However, the 32 
fragments packets added to the packet batch will be processed normally. 
When receiving the subsequent fragments of the ipf_list, because the 
first 32 fragments have been processed, when processing subsequent 
fragment packets, relevant information about the processed fragment 
packets will be read,therefore will occur carsh.
One solution is do not forward timeout fragment packets from the above 
scenarios, that is, do not add them to the packet batch, and handle 
other scenarios according to the original logic.
---
 lib/ipf.c | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

Comments

0-day Robot May 22, 2024, 2:40 a.m. UTC | #1
Bleep bloop.  Greetings laixiangwu, I am a robot and I have tried out your patch.
Thanks for your contribution.

I encountered some error that I wasn't expecting.  See the details below.


checkpatch:
ERROR: Author laixiangwu <15310488637@163.com> needs to sign off.
Lines checked: 73, Warnings: 0, Errors: 1


Please check this out.  If you feel there has been an error, please email aconole@redhat.com

Thanks,
0-day Robot
Mike Pattrick May 22, 2024, 3:39 p.m. UTC | #2
Hello laixaingwu,

I happen to have a patch on the list right now for a similar sounding issue:

https://patchwork.ozlabs.org/project/openvswitch/patch/20240516153832.153496-1-mkp@redhat.com/

Do you happen to have a stack trace available for this crash? That
could help determine if the crash is triggered at the same location.

I'm not sure I like the approach of not adding any fragments to the
batch during expiration, as this changes how packets are processed
currently and may be an unexpected behaviour for end users. Would it
be better to only add a few to the current batch and add the remaining
to later batches?

Thanks,
M



On Tue, May 21, 2024 at 10:20 PM laixiangwu <15310488637@163.com> wrote:
>
> Description:
>
> when 1) The fragment timeout is between 15 seconds and 25 seconds; 2)
> The ipf_list currently has received more than 32 fragments, and there
> are other fragments of same big packet that have not been received.
>
> When the above two scenario conditions are met, due to exceeding the
> capacity of the packet batch(here is 32), ipf_dp_packet_batch_add
> returns false, and ipf_list will not be cleared. However, the 32
> fragments packets added to the packet batch will be processed normally.
> When receiving the subsequent fragments of the ipf_list, because the
> first 32 fragments have been processed, when processing subsequent
> fragment packets, relevant information about the processed fragment
> packets will be read,therefore will occur carsh.
> One solution is do not forward timeout fragment packets from the above
> scenarios, that is, do not add them to the packet batch, and handle
> other scenarios according to the original logic.
> ---
>  lib/ipf.c | 10 ++++------
>  1 file changed, 4 insertions(+), 6 deletions(-)
>
> diff --git a/lib/ipf.c b/lib/ipf.c
> index d45266374..9258173ab 100644
> --- a/lib/ipf.c
> +++ b/lib/ipf.c
> @@ -1011,7 +1011,7 @@ ipf_purge_list_check(struct ipf *ipf, struct ipf_list *ipf_list,
>  }
>
>  /* Does the packet batch management and common accounting work associated
> - * with 'ipf_send_completed_frags()' and 'ipf_send_expired_frags()'. */
> + * with 'ipf_send_completed_frags()'. */
>  static bool
>  ipf_send_frags_in_list(struct ipf *ipf, struct ipf_list *ipf_list,
>                         struct dp_packet_batch *pb,
> @@ -1076,8 +1076,7 @@ ipf_send_completed_frags(struct ipf *ipf, struct dp_packet_batch *pb,
>   * a packet batch to be processed by the calling application, typically
>   * conntrack. Also cleans up the list context when it is empty.*/
>  static void
> -ipf_send_expired_frags(struct ipf *ipf, struct dp_packet_batch *pb,
> -                       long long now, bool v6)
> +ipf_clean_expired_frags(struct ipf *ipf, long long now)
>  {
>      enum {
>          /* Very conservative, due to DOS probability. */
> @@ -1099,8 +1098,7 @@ ipf_send_expired_frags(struct ipf *ipf, struct dp_packet_batch *pb,
>              break;
>          }
>
> -        if (ipf_send_frags_in_list(ipf, ipf_list, pb, IPF_FRAG_EXPIRY_LIST,
> -                                   v6, now)) {
> +        if (ipf_purge_list_check(ipf, ipf_list, now)) {
>              ipf_expiry_list_clean(&ipf->frag_lists, ipf_list);
>              lists_removed++;
>          } else {
> @@ -1249,7 +1247,7 @@ ipf_postprocess_conntrack(struct ipf *ipf, struct dp_packet_batch *pb,
>          bool v6 = dl_type == htons(ETH_TYPE_IPV6);
>          ipf_post_execute_reass_pkts(ipf, pb, v6);
>          ipf_send_completed_frags(ipf, pb, now, v6);
> -        ipf_send_expired_frags(ipf, pb, now, v6);
> +        ipf_clean_expired_frags(ipf, now);
>      }
>  }
>
> --
> 2.31.1.windows.1
>
> _______________________________________________
> dev mailing list
> dev@openvswitch.org
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
diff mbox series

Patch

diff --git a/lib/ipf.c b/lib/ipf.c
index d45266374..9258173ab 100644
--- a/lib/ipf.c
+++ b/lib/ipf.c
@@ -1011,7 +1011,7 @@  ipf_purge_list_check(struct ipf *ipf, struct ipf_list *ipf_list,
 }
 
 /* Does the packet batch management and common accounting work associated
- * with 'ipf_send_completed_frags()' and 'ipf_send_expired_frags()'. */
+ * with 'ipf_send_completed_frags()'. */
 static bool
 ipf_send_frags_in_list(struct ipf *ipf, struct ipf_list *ipf_list,
                        struct dp_packet_batch *pb,
@@ -1076,8 +1076,7 @@  ipf_send_completed_frags(struct ipf *ipf, struct dp_packet_batch *pb,
  * a packet batch to be processed by the calling application, typically
  * conntrack. Also cleans up the list context when it is empty.*/
 static void
-ipf_send_expired_frags(struct ipf *ipf, struct dp_packet_batch *pb,
-                       long long now, bool v6)
+ipf_clean_expired_frags(struct ipf *ipf, long long now)
 {
     enum {
         /* Very conservative, due to DOS probability. */
@@ -1099,8 +1098,7 @@  ipf_send_expired_frags(struct ipf *ipf, struct dp_packet_batch *pb,
             break;
         }
 
-        if (ipf_send_frags_in_list(ipf, ipf_list, pb, IPF_FRAG_EXPIRY_LIST,
-                                   v6, now)) {
+        if (ipf_purge_list_check(ipf, ipf_list, now)) {
             ipf_expiry_list_clean(&ipf->frag_lists, ipf_list);
             lists_removed++;
         } else {
@@ -1249,7 +1247,7 @@  ipf_postprocess_conntrack(struct ipf *ipf, struct dp_packet_batch *pb,
         bool v6 = dl_type == htons(ETH_TYPE_IPV6);
         ipf_post_execute_reass_pkts(ipf, pb, v6);
         ipf_send_completed_frags(ipf, pb, now, v6);
-        ipf_send_expired_frags(ipf, pb, now, v6);
+        ipf_clean_expired_frags(ipf, now);
     }
 }