]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
net: add recursion limit to GRO
authorSabrina Dubroca <sd () queasysnail net>
Tue, 11 Oct 2016 23:35:06 +0000 (19:35 -0400)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 13 Oct 2016 23:49:41 +0000 (16:49 -0700)
Orabug: 24829124
CVE: CVE-2016-7039

Currently, GRO can do unlimited recursion through the gro_receive
handlers.  This was fixed for tunneling protocols by limiting tunnel GRO
to one level with encap_mark, but both VLAN and TEB still have this
problem.  Thus, the kernel is vulnerable to a stack overflow, if we
receive a packet composed entirely of VLAN headers.

This patch adds a recursion counter to the GRO layer to prevent stack
overflow.  When a gro_receive function hits the recursion limit, GRO is
aborted for this skb and it is processed normally.

Fixes: 9b174d88c257 ("net: Add Transparent Ethernet Bridging GRO support.")
Fixes: 66e5133f19e9 ("vlan: Add GRO support for non hardware accelerated vlan")
Signed-off-by: Sabrina Dubroca <sd () queasysnail net>
Reviewed-by: Jiri Benc <jbenc () redhat com>
Acked-by: Hannes Frederic Sowa <hannes () stressinduktion org>
(cherry picked from commit e71f3b1fca2ae5d0ae9d9c1a02a93d52beaae322)

Signed-off-by: Brian Maly <brian.maly@oracle.com>
drivers/net/vxlan.c
include/linux/netdevice.h
net/core/dev.c
net/ethernet/eth.c
net/ipv4/af_inet.c
net/ipv4/fou.c
net/ipv4/geneve.c
net/ipv4/gre_offload.c
net/ipv4/udp_offload.c
net/ipv6/ip6_offload.c

index 21a0fbf1ed947a83506de920f7f61501457bfe68..4ddcb570858dc2713a4d5a9d36f26d699fcad5fe 100644 (file)
@@ -635,7 +635,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
                }
        }
 
-       pp = eth_gro_receive(head, skb);
+       pp = call_gro_receive(eth_gro_receive, head, skb);
 
 out:
        skb_gro_remcsum_cleanup(skb, &grc);
index 05b9a694e21312ad26beec7dfa0f32f719cc8c87..8b59df09ed83461eabcc06b07f472fedaf7f1b77 100644 (file)
@@ -1956,7 +1956,10 @@ struct napi_gro_cb {
        /* Used in foo-over-udp, set in udp[46]_gro_receive */
        u8      is_ipv6:1;
 
-       /* 7 bit hole */
+       /* Number of gro_receive callbacks this packet already went through */
+       u8 recursion_counter:4;
+
+       /* 3 bit hole */
 
        /* used to support CHECKSUM_COMPLETE for tunneling protocols */
        __wsum  csum;
@@ -1967,6 +1970,25 @@ struct napi_gro_cb {
 
 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
 
+#define GRO_RECURSION_LIMIT 15
+static inline int gro_recursion_inc_test(struct sk_buff *skb)
+{
+       return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
+}
+
+typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
+static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
+                                               struct sk_buff **head,
+                                               struct sk_buff *skb)
+{
+       if (gro_recursion_inc_test(skb)) {
+               NAPI_GRO_CB(skb)->flush |= 1;
+               return NULL;
+       }
+
+       return cb(head, skb);
+}
+
 struct packet_type {
        __be16                  type;   /* This is really htons(ether_type). */
        struct net_device       *dev;   /* NULL is wildcarded here           */
index aa82f9ab6a36d164769bf7c9633fcdfd5971466f..839094823b9d49eec8bd3ff462e2b9c256288b88 100644 (file)
@@ -4059,6 +4059,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
                NAPI_GRO_CB(skb)->free = 0;
                NAPI_GRO_CB(skb)->udp_mark = 0;
                NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
+               NAPI_GRO_CB(skb)->recursion_counter = 0;
 
                /* Setup for GRO checksum validation */
                switch (skb->ip_summed) {
index f3bad41d725f449f91d0b1b4f7119a9c9660e976..76f8389eacd27ac29624a6e2b08c4d597aaa460d 100644 (file)
@@ -434,7 +434,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
 
        skb_gro_pull(skb, sizeof(*eh));
        skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
-       pp = ptype->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index 8b47a4d79d040e39e592d3583affb7fec2d19f3d..cc45bc98a4f622cab5fc0f3c20c7a975a425b787 100644 (file)
@@ -1372,7 +1372,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
        skb_gro_pull(skb, sizeof(*iph));
        skb_set_transport_header(skb, skb_gro_offset(skb));
 
-       pp = ops->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index 34968cd5c1464bf896ba1a2130fc223c50973499..a733462774f86457d315ed78fa2c707af7b98e08 100644 (file)
@@ -187,7 +187,7 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
        if (!ops || !ops->callbacks.gro_receive)
                goto out_unlock;
 
-       pp = ops->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
@@ -354,7 +354,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
        if (WARN_ON(!ops || !ops->callbacks.gro_receive))
                goto out_unlock;
 
-       pp = ops->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index 8986e63f3bda61a6c8ba980c050b96ec90625107..7bfe23b6bf281411225e86355ef9048dcd49e744 100644 (file)
@@ -203,7 +203,7 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
 
        skb_gro_pull(skb, gh_len);
        skb_gro_postpull_rcsum(skb, gh, gh_len);
-       pp = ptype->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index 5aa46d4b44efb99702ccd89005528f20ae422a0e..205569610cd98a2fcecb21b73c1a5b3bb6f0380c 100644 (file)
@@ -213,7 +213,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
        /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
        skb_gro_postpull_rcsum(skb, greh, grehlen);
 
-       pp = ptype->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index f9386160cbee0288e294ea2cd8ba3b5be65cdbf6..1f0bc70558f4764fc3ad62f06eeb1b17736d0f87 100644 (file)
@@ -339,8 +339,14 @@ unflush:
        skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
        skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
        NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
-       pp = uo_priv->offload->callbacks.gro_receive(head, skb,
-                                                    uo_priv->offload);
+
+       if (gro_recursion_inc_test(skb)) {
+               flush = 1;
+               pp = NULL;
+       } else {
+               pp = uo_priv->offload->callbacks.gro_receive(head, skb,
+                                                            uo_priv->offload);
+       }
 
 out_unlock:
        rcu_read_unlock();
index e893cd18612fcdc9e8577f0e060ffd32b5659eea..84819acfa4a5f21bb96f4c55ce4e4dc658ffacfb 100644 (file)
@@ -247,7 +247,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
 
        skb_gro_postpull_rcsum(skb, iph, nlen);
 
-       pp = ops->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();