void udp_set_csum(bool nocheck, struct sk_buff *skb,
                  __be32 saddr, __be32 daddr, int len);
 
+struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
+                                struct udphdr *uh);
+int udp_gro_complete(struct sk_buff *skb, int nhoff);
+
+static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
+{
+       struct udphdr *uh;
+       unsigned int hlen, off;
+
+       off  = skb_gro_offset(skb);
+       hlen = off + sizeof(*uh);
+       uh   = skb_gro_header_fast(skb, off);
+       if (skb_gro_header_hard(skb, hlen))
+               uh = skb_gro_header_slow(skb, hlen, off);
+
+       return uh;
+}
+
 /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
 static inline void udp_lib_hash(struct sock *sk)
 {
 
 }
 EXPORT_SYMBOL(udp_del_offload);
 
-static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
+                                struct udphdr *uh)
 {
        struct udp_offload_priv *uo_priv;
        struct sk_buff *p, **pp = NULL;
-       struct udphdr *uh, *uh2;
-       unsigned int hlen, off;
+       struct udphdr *uh2;
+       unsigned int off = skb_gro_offset(skb);
        int flush = 1;
 
        if (NAPI_GRO_CB(skb)->udp_mark ||
-           (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE))
+           (!skb->encapsulation && !NAPI_GRO_CB(skb)->csum_valid))
                goto out;
 
        /* mark that this skb passed once through the udp gro layer */
        NAPI_GRO_CB(skb)->udp_mark = 1;
-
-       off  = skb_gro_offset(skb);
-       hlen = off + sizeof(*uh);
-       uh   = skb_gro_header_fast(skb, off);
-       if (skb_gro_header_hard(skb, hlen)) {
-               uh = skb_gro_header_slow(skb, hlen, off);
-               if (unlikely(!uh))
-                       goto out;
-       }
+       NAPI_GRO_CB(skb)->encapsulation++;
 
        rcu_read_lock();
        uo_priv = rcu_dereference(udp_offload_base);
                        continue;
 
                uh2 = (struct udphdr   *)(p->data + off);
-               if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) {
+
+               /* Match ports and either checksums are either both zero
+                * or nonzero.
+                */
+               if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) ||
+                   (!uh->check ^ !uh2->check)) {
                        NAPI_GRO_CB(p)->same_flow = 0;
                        continue;
                }
        return pp;
 }
 
-static int udp_gro_complete(struct sk_buff *skb, int nhoff)
+static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
+                                        struct sk_buff *skb)
+{
+       struct udphdr *uh = udp_gro_udphdr(skb);
+
+       /* Don't bother verifying checksum if we're going to flush anyway. */
+       if (unlikely(!uh) ||
+           (!NAPI_GRO_CB(skb)->flush &&
+            skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
+                                                 inet_gro_compute_pseudo))) {
+               NAPI_GRO_CB(skb)->flush = 1;
+               return NULL;
+       }
+
+       return udp_gro_receive(head, skb, uh);
+}
+
+int udp_gro_complete(struct sk_buff *skb, int nhoff)
 {
        struct udp_offload_priv *uo_priv;
        __be16 newlen = htons(skb->len - nhoff);
        return err;
 }
 
+int udp4_gro_complete(struct sk_buff *skb, int nhoff)
+{
+       const struct iphdr *iph = ip_hdr(skb);
+       struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
+
+       if (uh->check)
+               uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
+                                         iph->daddr, 0);
+
+       return udp_gro_complete(skb, nhoff);
+}
+
 static const struct net_offload udpv4_offload = {
        .callbacks = {
                .gso_send_check = udp4_ufo_send_check,
                .gso_segment = udp4_ufo_fragment,
-               .gro_receive  = udp_gro_receive,
-               .gro_complete = udp_gro_complete,
+               .gro_receive  = udp4_gro_receive,
+               .gro_complete = udp4_gro_complete,
        },
 };
 
 
  *      UDPv6 GSO support
  */
 #include <linux/skbuff.h>
+#include <linux/netdevice.h>
 #include <net/protocol.h>
 #include <net/ipv6.h>
 #include <net/udp.h>
 out:
        return segs;
 }
+
+static struct sk_buff **udp6_gro_receive(struct sk_buff **head,
+                                        struct sk_buff *skb)
+{
+       struct udphdr *uh = udp_gro_udphdr(skb);
+
+       /* Don't bother verifying checksum if we're going to flush anyway. */
+       if (unlikely(!uh) ||
+           (!NAPI_GRO_CB(skb)->flush &&
+            skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
+                                                 ip6_gro_compute_pseudo))) {
+               NAPI_GRO_CB(skb)->flush = 1;
+               return NULL;
+       }
+
+       return udp_gro_receive(head, skb, uh);
+}
+
+int udp6_gro_complete(struct sk_buff *skb, int nhoff)
+{
+       const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
+
+       if (uh->check)
+               uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr,
+                                         &ipv6h->daddr, 0);
+
+       return udp_gro_complete(skb, nhoff);
+}
+
 static const struct net_offload udpv6_offload = {
        .callbacks = {
                .gso_send_check =       udp6_ufo_send_check,
                .gso_segment    =       udp6_ufo_fragment,
+               .gro_receive    =       udp6_gro_receive,
+               .gro_complete   =       udp6_gro_complete,
        },
 };