Patchwork Lucid CVE-2012-3412

login
register
mail settings
Submitter Herton Ronaldo Krzesinski
Date Aug. 24, 2012, 7:50 p.m.
Message ID <20120824195052.GC3072@herton-Z68MA-D2H-B3>
Download mbox | patch
Permalink /patch/179895/
State New
Headers show

Comments

Herton Ronaldo Krzesinski - Aug. 24, 2012, 7:50 p.m.
On Fri, Aug 24, 2012 at 09:11:22AM -0600, Tim Gardner wrote:
> On 08/24/2012 09:05 AM, Herton Ronaldo Krzesinski wrote:
> > On Fri, Aug 24, 2012 at 07:58:34AM -0600, Tim Gardner wrote:
> >>  static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
> >>  {
> >> +	if (skb_is_gso(skb) &&
> >> +		skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
> >> +		return 0;
> > 
> > Shouldn't be return 1 here? If the condition is true, we would clear the
> > flags from features. If flags are cleared, when calling skb_gso_ok:
> > 
> > net_gso_ok would always return 0
> > skb_gso_ok would always return 0
> > netif_needs_gso returns 1 because it does !skb_gso_ok
> > 
> > Unless I'm missing something here. Anyway, hard to read these functions...
> > I think just copying/clearing the flags and passing through skb_gso_ok
> > would be better.
> > 
> 
> I guess I'm confused about when the flag is set. I was assuming if GSO
> was set, then the driver handled 'generic segmentation offload'. Aren't
> we checking for error conditions, e.g., if there are more segments then
> the driver can handle, then _don't_ do GSO ?

Ok, forget last thing I said on IRC about sfc, it is the only one
limiting the gso_max_segs, the others which supports GSO continues to do
so and use the default maximum of GSO_MAX_SEGS as defined on the patch.
Looks like only sfc indeed needs this and is affected, as it's my
understanding.

About the patch, if still pursuing this, I expect the following diff
should work for Lucid. I think I handled what's needed. I noticed
skb_gso_ok was used at dev_gso_segment()->skb_gso_segment(), so we
can't patch netif_needs_gso directly, and if we skb_gso_ok is used with
protocol code so we couldn't clear flags there (not sure if there would
be a side effect doing so).

> 
> rtg
> -- 
> Tim Gardner tim.gardner@canonical.com
>

Patch

diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 1a11d95..422001f 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -486,7 +486,7 @@  static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	if (unlikely(!netif_carrier_ok(dev) ||
 		     (frags > 1 && !xennet_can_sg(dev)) ||
-		     netif_needs_gso(dev, skb))) {
+		     netif_needs_gso(skb, netif_skb_features(dev, skb)))) {
 		spin_unlock_irq(&np->tx_lock);
 		goto drop;
 	}
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ea6187c..9216ff6 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -907,6 +907,8 @@  struct net_device
 	/* for setting kernel sock attribute on TCP connection setup */
 #define GSO_MAX_SIZE		65536
 	unsigned int		gso_max_size;
+#define GSO_MAX_SEGS		65535
+	u16			gso_max_segs;
 
 #ifdef CONFIG_DCB
 	/* Data Center Bridging netlink ops */
@@ -1933,10 +1935,10 @@  static inline int skb_gso_ok(struct sk_buff *skb, int features)
 	       (!skb_has_frags(skb) || (features & NETIF_F_FRAGLIST));
 }
 
-static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
+static inline int netif_needs_gso(struct sk_buff *skb, int features)
 {
 	return skb_is_gso(skb) &&
-	       (!skb_gso_ok(skb, dev->features) ||
+	       (!skb_gso_ok(skb, features) ||
 		unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
 }
 
diff --git a/net/core/dev.c b/net/core/dev.c
index f32f98a..5753bfe 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1516,6 +1516,17 @@  static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
 	return false;
 }
 
+int netif_skb_features(struct net_device *dev, struct sk_buff *skb)
+{
+	int features = dev->features;
+
+	if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
+		features &= ~NETIF_F_GSO_MASK;
+
+	return features;
+}
+EXPORT_SYMBOL(netif_skb_features);
+
 /*
  * Invalidate hardware checksum when packet is to be mangled, and
  * complete checksum manually on outgoing path.
@@ -1682,13 +1693,12 @@  static void dev_gso_skb_destructor(struct sk_buff *skb)
  *	This function segments the given skb and stores the list of segments
  *	in skb->next.
  */
-static int dev_gso_segment(struct sk_buff *skb)
+static int dev_gso_segment(struct sk_buff *skb, int features)
 {
 	struct net_device *dev = skb->dev;
 	struct sk_buff *segs;
-	int features = dev->features & ~(illegal_highdma(dev, skb) ?
-					 NETIF_F_SG : 0);
 
+	features &= ~(illegal_highdma(dev, skb) ? NETIF_F_SG : 0);
 	segs = skb_gso_segment(skb, features);
 
 	/* Verifying header integrity only. */
@@ -1712,11 +1722,15 @@  int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 	int rc;
 
 	if (likely(!skb->next)) {
+		int features;
+
 		if (!list_empty(&ptype_all))
 			dev_queue_xmit_nit(skb, dev);
 
-		if (netif_needs_gso(dev, skb)) {
-			if (unlikely(dev_gso_segment(skb)))
+		features = netif_skb_features(dev, skb);
+
+		if (netif_needs_gso(skb, features)) {
+			if (unlikely(dev_gso_segment(skb, features)))
 				goto out_kfree_skb;
 			if (skb->next)
 				goto gso;
@@ -1887,7 +1901,7 @@  int dev_queue_xmit(struct sk_buff *skb)
 	int rc = -ENOMEM;
 
 	/* GSO will handle the following emulations directly. */
-	if (netif_needs_gso(dev, skb))
+	if (netif_needs_gso(skb, netif_skb_features(dev, skb)))
 		goto gso;
 
 	if (skb_has_frags(skb) &&
@@ -5195,6 +5209,7 @@  struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
 	dev->real_num_tx_queues = queue_count;
 
 	dev->gso_max_size = GSO_MAX_SIZE;
+	dev->gso_max_segs = GSO_MAX_SEGS;
 
 	netdev_init_queues(dev);