diff mbox

[net,3/4] xen-netback: Fix releasing header slot on error path

Message ID 1405624192-21722-4-git-send-email-zoltan.kiss@citrix.com
State Superseded, archived
Delegated to: David Miller
Headers show

Commit Message

Zoltan Kiss July 17, 2014, 7:09 p.m. UTC
This patch makes this function aware that the first frag and the header might
share the same ring slot. That could happen if the first slot is bigger than
MAX_SKB_LEN. Due to this the error path might release that slot twice or never,
depending on the error scenario.
xenvif_idx_release is also removed from xenvif_idx_unmap, and called separately.

Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com>
Reported-by: Armin Zentai <armin.zentai@ezit.hu>
Cc: netdev@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
---
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Wei Liu July 18, 2014, 3:25 p.m. UTC | #1
On Thu, Jul 17, 2014 at 08:09:51PM +0100, Zoltan Kiss wrote:
> This patch makes this function aware that the first frag and the header might
> share the same ring slot. That could happen if the first slot is bigger than
> MAX_SKB_LEN. Due to this the error path might release that slot twice or never,

I guess you mean PKT_PROT_LEN.

Just one question, how come that we didn't come across this with copying
backend? Comparing txreq.size against PKT_PROT_LEN is not new in mapping
backend.

> depending on the error scenario.
> xenvif_idx_release is also removed from xenvif_idx_unmap, and called separately.
> 
> Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com>
> Reported-by: Armin Zentai <armin.zentai@ezit.hu>
> Cc: netdev@vger.kernel.org
> Cc: linux-kernel@vger.kernel.org
> Cc: xen-devel@lists.xenproject.org
> ---
> diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
> index e9ffb05..938d5a9 100644
> --- a/drivers/net/xen-netback/netback.c
> +++ b/drivers/net/xen-netback/netback.c
> @@ -1039,6 +1039,8 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
>  	 */
>  	struct skb_shared_info *first_shinfo = NULL;
>  	int nr_frags = shinfo->nr_frags;
> +	const bool sharedslot = nr_frags &&
> +				frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
>  	int i, err;
>  
>  	/* Check status of header. */
> @@ -1051,7 +1053,10 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
>  				   (*gopp_copy)->status,
>  				   pending_idx,
>  				   (*gopp_copy)->source.u.ref);
> -		xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
> +		/* The first frag might still have this slot mapped */
> +		if (!sharedslot)
> +			xenvif_idx_release(queue, pending_idx,
> +					   XEN_NETIF_RSP_ERROR);
>  	}
>  
>  check_frags:
> @@ -1068,8 +1073,19 @@ check_frags:
>  						pending_idx,
>  						gop_map->handle);
>  			/* Had a previous error? Invalidate this fragment. */
> -			if (unlikely(err))
> +			if (unlikely(err)) {
>  				xenvif_idx_unmap(queue, pending_idx);
> +				/* If the mapping of the first frag was OK, but
> +				 * the header's copy failed, and they are
> +				 * sharing a slot, send an error
> +				 */
> +				if (i == 0 && sharedslot)
> +					xenvif_idx_release(queue, pending_idx,
> +							   XEN_NETIF_RSP_ERROR);
> +				else
> +					xenvif_idx_release(queue, pending_idx,
> +							   XEN_NETIF_RSP_OKAY);

I guess this is sort of OK, just a bit fragile. Couldn't think of a
better way to refactor this function. :-(

> +			}
>  			continue;
>  		}
>  
> @@ -1081,15 +1097,27 @@ check_frags:
>  				   gop_map->status,
>  				   pending_idx,
>  				   gop_map->ref);
> +

Stray blank line.

And did you miss a callsite of xenvif_idx_unmap in this function which
is added in your first patch?

Wei.
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Zoltan Kiss July 18, 2014, 6:06 p.m. UTC | #2
On 18/07/14 16:25, Wei Liu wrote:
> On Thu, Jul 17, 2014 at 08:09:51PM +0100, Zoltan Kiss wrote:
>> This patch makes this function aware that the first frag and the header might
>> share the same ring slot. That could happen if the first slot is bigger than
>> MAX_SKB_LEN. Due to this the error path might release that slot twice or never,
>
> I guess you mean PKT_PROT_LEN.
Yes
>
> Just one question, how come that we didn't come across this with copying
> backend? Comparing txreq.size against PKT_PROT_LEN is not new in mapping
> backend.
We had one grant copy for the header and the first frag in that case, 
and we skipped the first frag:

	/* Skip first skb fragment if it is on same page as header fragment. */
	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);


>
>> depending on the error scenario.
>> xenvif_idx_release is also removed from xenvif_idx_unmap, and called separately.
>>
>> Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com>
>> Reported-by: Armin Zentai <armin.zentai@ezit.hu>
>> Cc: netdev@vger.kernel.org
>> Cc: linux-kernel@vger.kernel.org
>> Cc: xen-devel@lists.xenproject.org
>> ---
>> diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
>> index e9ffb05..938d5a9 100644
>> --- a/drivers/net/xen-netback/netback.c
>> +++ b/drivers/net/xen-netback/netback.c
>> @@ -1039,6 +1039,8 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
>>   	 */
>>   	struct skb_shared_info *first_shinfo = NULL;
>>   	int nr_frags = shinfo->nr_frags;
>> +	const bool sharedslot = nr_frags &&
>> +				frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
>>   	int i, err;
>>
>>   	/* Check status of header. */
>> @@ -1051,7 +1053,10 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
>>   				   (*gopp_copy)->status,
>>   				   pending_idx,
>>   				   (*gopp_copy)->source.u.ref);
>> -		xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
>> +		/* The first frag might still have this slot mapped */
>> +		if (!sharedslot)
>> +			xenvif_idx_release(queue, pending_idx,
>> +					   XEN_NETIF_RSP_ERROR);
>>   	}
>>
>>   check_frags:
>> @@ -1068,8 +1073,19 @@ check_frags:
>>   						pending_idx,
>>   						gop_map->handle);
>>   			/* Had a previous error? Invalidate this fragment. */
>> -			if (unlikely(err))
>> +			if (unlikely(err)) {
>>   				xenvif_idx_unmap(queue, pending_idx);
>> +				/* If the mapping of the first frag was OK, but
>> +				 * the header's copy failed, and they are
>> +				 * sharing a slot, send an error
>> +				 */
>> +				if (i == 0 && sharedslot)
>> +					xenvif_idx_release(queue, pending_idx,
>> +							   XEN_NETIF_RSP_ERROR);
>> +				else
>> +					xenvif_idx_release(queue, pending_idx,
>> +							   XEN_NETIF_RSP_OKAY);
>
> I guess this is sort of OK, just a bit fragile. Couldn't think of a
> better way to refactor this function. :-(
I was thinking a lot about how to refactor this whole thing, but I gave 
up too ...
>
>> +			}
>>   			continue;
>>   		}
>>
>> @@ -1081,15 +1097,27 @@ check_frags:
>>   				   gop_map->status,
>>   				   pending_idx,
>>   				   gop_map->ref);
>> +
>
> Stray blank line.
>
> And did you miss a callsite of xenvif_idx_unmap in this function which
> is added in your first patch?
Nope, the xenvif_idx_release is there
>
> Wei.
>

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e9ffb05..938d5a9 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1039,6 +1039,8 @@  static int xenvif_tx_check_gop(struct xenvif_queue *queue,
 	 */
 	struct skb_shared_info *first_shinfo = NULL;
 	int nr_frags = shinfo->nr_frags;
+	const bool sharedslot = nr_frags &&
+				frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
 	int i, err;
 
 	/* Check status of header. */
@@ -1051,7 +1053,10 @@  static int xenvif_tx_check_gop(struct xenvif_queue *queue,
 				   (*gopp_copy)->status,
 				   pending_idx,
 				   (*gopp_copy)->source.u.ref);
-		xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
+		/* The first frag might still have this slot mapped */
+		if (!sharedslot)
+			xenvif_idx_release(queue, pending_idx,
+					   XEN_NETIF_RSP_ERROR);
 	}
 
 check_frags:
@@ -1068,8 +1073,19 @@  check_frags:
 						pending_idx,
 						gop_map->handle);
 			/* Had a previous error? Invalidate this fragment. */
-			if (unlikely(err))
+			if (unlikely(err)) {
 				xenvif_idx_unmap(queue, pending_idx);
+				/* If the mapping of the first frag was OK, but
+				 * the header's copy failed, and they are
+				 * sharing a slot, send an error
+				 */
+				if (i == 0 && sharedslot)
+					xenvif_idx_release(queue, pending_idx,
+							   XEN_NETIF_RSP_ERROR);
+				else
+					xenvif_idx_release(queue, pending_idx,
+							   XEN_NETIF_RSP_OKAY);
+			}
 			continue;
 		}
 
@@ -1081,15 +1097,27 @@  check_frags:
 				   gop_map->status,
 				   pending_idx,
 				   gop_map->ref);
+
 		xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
 
 		/* Not the first error? Preceding frags already invalidated. */
 		if (err)
 			continue;
-		/* First error: invalidate preceding fragments. */
+
+		/* First error: if the header haven't shared a slot with the
+		 * first frag, release it as well.
+		 */
+		if (!sharedslot)
+			xenvif_idx_release(queue,
+					   XENVIF_TX_CB(skb)->pending_idx,
+					   XEN_NETIF_RSP_OKAY);
+
+		/* Invalidate preceding fragments of this skb. */
 		for (j = 0; j < i; j++) {
 			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
 			xenvif_idx_unmap(queue, pending_idx);
+			xenvif_idx_release(queue, pending_idx,
+					   XEN_NETIF_RSP_OKAY);
 		}
 
 		/* And if we found the error while checking the frag_list, unmap
@@ -1099,6 +1127,8 @@  check_frags:
 			for (j = 0; j < first_shinfo->nr_frags; j++) {
 				pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
 				xenvif_idx_unmap(queue, pending_idx);
+				xenvif_idx_release(queue, pending_idx,
+						   XEN_NETIF_RSP_OKAY);
 			}
 		}
 
@@ -1830,8 +1860,6 @@  void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
 			   tx_unmap_op.status);
 		BUG();
 	}
-
-	xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
 }
 
 static inline int rx_work_todo(struct xenvif_queue *queue)