diff mbox series

[v5,2/5] sctp: Always pass skbs on a list to sctp_ulpq_tail_event().

Message ID 20190411.150157.1608261572973187963.davem@davemloft.net
State Accepted
Delegated to: David Miller
Headers show
Series SCTP: Event skb list overhaul. | expand

Commit Message

David Miller April 11, 2019, 10:01 p.m. UTC
This way we can simplify the logic and remove assumptions
about the implementation of skb lists.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 net/sctp/ulpqueue.c | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)

Comments

Marcelo Ricardo Leitner April 11, 2019, 10:18 p.m. UTC | #1
On Thu, Apr 11, 2019 at 03:01:57PM -0700, David Miller wrote:
> 
> This way we can simplify the logic and remove assumptions
> about the implementation of skb lists.
> 
> Signed-off-by: David S. Miller <davem@davemloft.net>

Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>

> ---
>  net/sctp/ulpqueue.c | 16 ++++++++++------
>  1 file changed, 10 insertions(+), 6 deletions(-)
> 
> diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
> index 0fecc1fb4ab7..b22f558adc49 100644
> --- a/net/sctp/ulpqueue.c
> +++ b/net/sctp/ulpqueue.c
> @@ -738,19 +738,19 @@ void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
>  static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
>  {
>  	struct sctp_ulpevent *event = NULL;
> -	struct sk_buff_head temp;
>  
>  	if (skb_queue_empty(&ulpq->reasm))
>  		return;
>  
>  	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
> -		/* Do ordering if needed.  */
> -		if (event->msg_flags & MSG_EOR) {
> -			skb_queue_head_init(&temp);
> -			__skb_queue_tail(&temp, sctp_event2skb(event));
> +		struct sk_buff_head temp;
> +
> +		skb_queue_head_init(&temp);
> +		__skb_queue_tail(&temp, sctp_event2skb(event));
>  
> +		/* Do ordering if needed.  */
> +		if (event->msg_flags & MSG_EOR)
>  			event = sctp_ulpq_order(ulpq, event);
> -		}
>  
>  		/* Send event to the ULP.  'event' is the
>  		 * sctp_ulpevent for  very first SKB on the  temp' list.
> @@ -1082,6 +1082,10 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
>  		event = sctp_ulpq_retrieve_first(ulpq);
>  		/* Send event to the ULP.   */
>  		if (event) {
> +			struct sk_buff_head temp;
> +
> +			skb_queue_head_init(&temp);
> +			__skb_queue_tail(&temp, sctp_event2skb(event));
>  			sctp_ulpq_tail_event(ulpq, event);
>  			sctp_ulpq_set_pd(ulpq);
>  			return;
> -- 
> 2.20.1
>
diff mbox series

Patch

diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 0fecc1fb4ab7..b22f558adc49 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -738,19 +738,19 @@  void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
 {
 	struct sctp_ulpevent *event = NULL;
-	struct sk_buff_head temp;
 
 	if (skb_queue_empty(&ulpq->reasm))
 		return;
 
 	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
-		/* Do ordering if needed.  */
-		if (event->msg_flags & MSG_EOR) {
-			skb_queue_head_init(&temp);
-			__skb_queue_tail(&temp, sctp_event2skb(event));
+		struct sk_buff_head temp;
+
+		skb_queue_head_init(&temp);
+		__skb_queue_tail(&temp, sctp_event2skb(event));
 
+		/* Do ordering if needed.  */
+		if (event->msg_flags & MSG_EOR)
 			event = sctp_ulpq_order(ulpq, event);
-		}
 
 		/* Send event to the ULP.  'event' is the
 		 * sctp_ulpevent for  very first SKB on the  temp' list.
@@ -1082,6 +1082,10 @@  void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
 		event = sctp_ulpq_retrieve_first(ulpq);
 		/* Send event to the ULP.   */
 		if (event) {
+			struct sk_buff_head temp;
+
+			skb_queue_head_init(&temp);
+			__skb_queue_tail(&temp, sctp_event2skb(event));
 			sctp_ulpq_tail_event(ulpq, event);
 			sctp_ulpq_set_pd(ulpq);
 			return;