Patchwork [net,V2,1/2] tuntap: do not zerocopy if iov needs more pages than MAX_SKB_FRAGS

login
register
mail settings
Submitter Jason Wang
Date July 18, 2013, 2:55 a.m.
Message ID <1374116116-43844-1-git-send-email-jasowang@redhat.com>
Download mbox | patch
Permalink /patch/259997/
State Accepted
Delegated to: David Miller
Headers show

Comments

Jason Wang - July 18, 2013, 2:55 a.m.
We try to linearize part of the skb when the number of iov is greater than
MAX_SKB_FRAGS. This is not enough since each single vector may occupy more than
one pages, so zerocopy_sg_fromiovec() may still fail and may break the guest
network.

Solve this problem by calculate the pages needed for iov before trying to do
zerocopy and switch to use copy instead of zerocopy if it needs more than
MAX_SKB_FRAGS.

This is done through introducing a new helper to count the pages for iov, and
call uarg->callback() manually when switching from zerocopy to copy to notify
vhost.

We can do further optimization on top.

The bug were introduced from commit 0690899b4d4501b3505be069b9a687e68ccbe15b
(tun: experimental zero copy tx support)

Cc: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>

---
The patch is needed fot -stable.
---
 drivers/net/tun.c |   62 ++++++++++++++++++++++++++++++++--------------------
 1 files changed, 38 insertions(+), 24 deletions(-)
Michael S. Tsirkin - July 19, 2013, 5:03 a.m.
On Thu, Jul 18, 2013 at 10:55:15AM +0800, Jason Wang wrote:
> We try to linearize part of the skb when the number of iov is greater than
> MAX_SKB_FRAGS. This is not enough since each single vector may occupy more than
> one pages, so zerocopy_sg_fromiovec() may still fail and may break the guest
> network.
> 
> Solve this problem by calculate the pages needed for iov before trying to do
> zerocopy and switch to use copy instead of zerocopy if it needs more than
> MAX_SKB_FRAGS.
> 
> This is done through introducing a new helper to count the pages for iov, and
> call uarg->callback() manually when switching from zerocopy to copy to notify
> vhost.
> 
> We can do further optimization on top.
> 
> The bug were introduced from commit 0690899b4d4501b3505be069b9a687e68ccbe15b
> (tun: experimental zero copy tx support)
> 
> Cc: Michael S. Tsirkin <mst@redhat.com>
> Signed-off-by: Jason Wang <jasowang@redhat.com>


Acked-by: Michael S. Tsirkin <mst@redhat.com>

> ---
> The patch is needed fot -stable.
> ---
>  drivers/net/tun.c |   62 ++++++++++++++++++++++++++++++++--------------------
>  1 files changed, 38 insertions(+), 24 deletions(-)
> 
> diff --git a/drivers/net/tun.c b/drivers/net/tun.c
> index 5cdcf92..db690a3 100644
> --- a/drivers/net/tun.c
> +++ b/drivers/net/tun.c
> @@ -1035,6 +1035,29 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
>  	return 0;
>  }
>  
> +static unsigned long iov_pages(const struct iovec *iv, int offset,
> +			       unsigned long nr_segs)
> +{
> +	unsigned long seg, base;
> +	int pages = 0, len, size;
> +
> +	while (nr_segs && (offset >= iv->iov_len)) {
> +		offset -= iv->iov_len;
> +		++iv;
> +		--nr_segs;
> +	}
> +
> +	for (seg = 0; seg < nr_segs; seg++) {
> +		base = (unsigned long)iv[seg].iov_base + offset;
> +		len = iv[seg].iov_len - offset;
> +		size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
> +		pages += size;
> +		offset = 0;
> +	}
> +
> +	return pages;
> +}
> +
>  /* Get packet from user space buffer */
>  static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
>  			    void *msg_control, const struct iovec *iv,
> @@ -1082,32 +1105,18 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
>  			return -EINVAL;
>  	}
>  
> -	if (msg_control)
> -		zerocopy = true;
> -
> -	if (zerocopy) {
> -		/* Userspace may produce vectors with count greater than
> -		 * MAX_SKB_FRAGS, so we need to linearize parts of the skb
> -		 * to let the rest of data to be fit in the frags.
> -		 */
> -		if (count > MAX_SKB_FRAGS) {
> -			copylen = iov_length(iv, count - MAX_SKB_FRAGS);
> -			if (copylen < offset)
> -				copylen = 0;
> -			else
> -				copylen -= offset;
> -		} else
> -				copylen = 0;
> -		/* There are 256 bytes to be copied in skb, so there is enough
> -		 * room for skb expand head in case it is used.
> +	if (msg_control) {
> +		/* There are 256 bytes to be copied in skb, so there is
> +		 * enough room for skb expand head in case it is used.
>  		 * The rest of the buffer is mapped from userspace.
>  		 */
> -		if (copylen < gso.hdr_len)
> -			copylen = gso.hdr_len;
> -		if (!copylen)
> -			copylen = GOODCOPY_LEN;
> +		copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
>  		linear = copylen;
> -	} else {
> +		if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
> +			zerocopy = true;
> +	}
> +
> +	if (!zerocopy) {
>  		copylen = len;
>  		linear = gso.hdr_len;
>  	}
> @@ -1121,8 +1130,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
>  
>  	if (zerocopy)
>  		err = zerocopy_sg_from_iovec(skb, iv, offset, count);
> -	else
> +	else {
>  		err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
> +		if (!err && msg_control) {
> +			struct ubuf_info *uarg = msg_control;
> +			uarg->callback(uarg, false);
> +		}
> +	}
>  
>  	if (err) {
>  		tun->dev->stats.rx_dropped++;
> -- 
> 1.7.1
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Michael S. Tsirkin - July 30, 2013, 11:41 a.m.
On Fri, Jul 19, 2013 at 08:03:35AM +0300, Michael S. Tsirkin wrote:
> On Thu, Jul 18, 2013 at 10:55:15AM +0800, Jason Wang wrote:
> > We try to linearize part of the skb when the number of iov is greater than
> > MAX_SKB_FRAGS. This is not enough since each single vector may occupy more than
> > one pages, so zerocopy_sg_fromiovec() may still fail and may break the guest
> > network.
> > 
> > Solve this problem by calculate the pages needed for iov before trying to do
> > zerocopy and switch to use copy instead of zerocopy if it needs more than
> > MAX_SKB_FRAGS.
> > 
> > This is done through introducing a new helper to count the pages for iov, and
> > call uarg->callback() manually when switching from zerocopy to copy to notify
> > vhost.
> > 
> > We can do further optimization on top.
> > 
> > The bug were introduced from commit 0690899b4d4501b3505be069b9a687e68ccbe15b
> > (tun: experimental zero copy tx support)
> > 
> > Cc: Michael S. Tsirkin <mst@redhat.com>
> > Signed-off-by: Jason Wang <jasowang@redhat.com>
> 
> 
> Acked-by: Michael S. Tsirkin <mst@redhat.com>

Was this patch dropped?
Any objections to merging it for 3.11 and stable?

> > ---
> > The patch is needed fot -stable.
> > ---
> >  drivers/net/tun.c |   62 ++++++++++++++++++++++++++++++++--------------------
> >  1 files changed, 38 insertions(+), 24 deletions(-)
> > 
> > diff --git a/drivers/net/tun.c b/drivers/net/tun.c
> > index 5cdcf92..db690a3 100644
> > --- a/drivers/net/tun.c
> > +++ b/drivers/net/tun.c
> > @@ -1035,6 +1035,29 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
> >  	return 0;
> >  }
> >  
> > +static unsigned long iov_pages(const struct iovec *iv, int offset,
> > +			       unsigned long nr_segs)
> > +{
> > +	unsigned long seg, base;
> > +	int pages = 0, len, size;
> > +
> > +	while (nr_segs && (offset >= iv->iov_len)) {
> > +		offset -= iv->iov_len;
> > +		++iv;
> > +		--nr_segs;
> > +	}
> > +
> > +	for (seg = 0; seg < nr_segs; seg++) {
> > +		base = (unsigned long)iv[seg].iov_base + offset;
> > +		len = iv[seg].iov_len - offset;
> > +		size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
> > +		pages += size;
> > +		offset = 0;
> > +	}
> > +
> > +	return pages;
> > +}
> > +
> >  /* Get packet from user space buffer */
> >  static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
> >  			    void *msg_control, const struct iovec *iv,
> > @@ -1082,32 +1105,18 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
> >  			return -EINVAL;
> >  	}
> >  
> > -	if (msg_control)
> > -		zerocopy = true;
> > -
> > -	if (zerocopy) {
> > -		/* Userspace may produce vectors with count greater than
> > -		 * MAX_SKB_FRAGS, so we need to linearize parts of the skb
> > -		 * to let the rest of data to be fit in the frags.
> > -		 */
> > -		if (count > MAX_SKB_FRAGS) {
> > -			copylen = iov_length(iv, count - MAX_SKB_FRAGS);
> > -			if (copylen < offset)
> > -				copylen = 0;
> > -			else
> > -				copylen -= offset;
> > -		} else
> > -				copylen = 0;
> > -		/* There are 256 bytes to be copied in skb, so there is enough
> > -		 * room for skb expand head in case it is used.
> > +	if (msg_control) {
> > +		/* There are 256 bytes to be copied in skb, so there is
> > +		 * enough room for skb expand head in case it is used.
> >  		 * The rest of the buffer is mapped from userspace.
> >  		 */
> > -		if (copylen < gso.hdr_len)
> > -			copylen = gso.hdr_len;
> > -		if (!copylen)
> > -			copylen = GOODCOPY_LEN;
> > +		copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
> >  		linear = copylen;
> > -	} else {
> > +		if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
> > +			zerocopy = true;
> > +	}
> > +
> > +	if (!zerocopy) {
> >  		copylen = len;
> >  		linear = gso.hdr_len;
> >  	}
> > @@ -1121,8 +1130,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
> >  
> >  	if (zerocopy)
> >  		err = zerocopy_sg_from_iovec(skb, iv, offset, count);
> > -	else
> > +	else {
> >  		err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
> > +		if (!err && msg_control) {
> > +			struct ubuf_info *uarg = msg_control;
> > +			uarg->callback(uarg, false);
> > +		}
> > +	}
> >  
> >  	if (err) {
> >  		tun->dev->stats.rx_dropped++;
> > -- 
> > 1.7.1
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
David Miller - July 30, 2013, 7:55 p.m.
From: "Michael S. Tsirkin" <mst@redhat.com>
Date: Tue, 30 Jul 2013 14:41:13 +0300

> Was this patch dropped?

No, it's commit 885291761dba2bfe04df4c0f7bb75e4c920ab82e

> Any objections to merging it for 3.11 and stable?

No objections, queued up for -stable.
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Patch

diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 5cdcf92..db690a3 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1035,6 +1035,29 @@  static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
 	return 0;
 }
 
+static unsigned long iov_pages(const struct iovec *iv, int offset,
+			       unsigned long nr_segs)
+{
+	unsigned long seg, base;
+	int pages = 0, len, size;
+
+	while (nr_segs && (offset >= iv->iov_len)) {
+		offset -= iv->iov_len;
+		++iv;
+		--nr_segs;
+	}
+
+	for (seg = 0; seg < nr_segs; seg++) {
+		base = (unsigned long)iv[seg].iov_base + offset;
+		len = iv[seg].iov_len - offset;
+		size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+		pages += size;
+		offset = 0;
+	}
+
+	return pages;
+}
+
 /* Get packet from user space buffer */
 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 			    void *msg_control, const struct iovec *iv,
@@ -1082,32 +1105,18 @@  static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 			return -EINVAL;
 	}
 
-	if (msg_control)
-		zerocopy = true;
-
-	if (zerocopy) {
-		/* Userspace may produce vectors with count greater than
-		 * MAX_SKB_FRAGS, so we need to linearize parts of the skb
-		 * to let the rest of data to be fit in the frags.
-		 */
-		if (count > MAX_SKB_FRAGS) {
-			copylen = iov_length(iv, count - MAX_SKB_FRAGS);
-			if (copylen < offset)
-				copylen = 0;
-			else
-				copylen -= offset;
-		} else
-				copylen = 0;
-		/* There are 256 bytes to be copied in skb, so there is enough
-		 * room for skb expand head in case it is used.
+	if (msg_control) {
+		/* There are 256 bytes to be copied in skb, so there is
+		 * enough room for skb expand head in case it is used.
 		 * The rest of the buffer is mapped from userspace.
 		 */
-		if (copylen < gso.hdr_len)
-			copylen = gso.hdr_len;
-		if (!copylen)
-			copylen = GOODCOPY_LEN;
+		copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
 		linear = copylen;
-	} else {
+		if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
+			zerocopy = true;
+	}
+
+	if (!zerocopy) {
 		copylen = len;
 		linear = gso.hdr_len;
 	}
@@ -1121,8 +1130,13 @@  static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 
 	if (zerocopy)
 		err = zerocopy_sg_from_iovec(skb, iv, offset, count);
-	else
+	else {
 		err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
+		if (!err && msg_control) {
+			struct ubuf_info *uarg = msg_control;
+			uarg->callback(uarg, false);
+		}
+	}
 
 	if (err) {
 		tun->dev->stats.rx_dropped++;