int data_offset;
 
        /* This is non-zero if the packet cannot be merged with the new skb. */
-       int flush;
+       u16     flush;
+
+       /* Save the IP ID here and check when we get to the transport layer */
+       u16     flush_id;
 
        /* Number of segments aggregated. */
        u16     count;
        /* Used in ipv6_gro_receive() */
        int     proto;
 
+       /* used to support CHECKSUM_COMPLETE for tunneling protocols */
+       __wsum  csum;
+
        /* used in skb_gro_receive() slow path */
        struct sk_buff *last;
 };
               skb_network_offset(skb);
 }
 
+static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
+                                       const void *start, unsigned int len)
+{
+       if (skb->ip_summed == CHECKSUM_COMPLETE)
+               NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
+                                                 csum_partial(start, len, 0));
+}
+
 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
                                  unsigned short type,
                                  const void *daddr, const void *saddr,
 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
 struct sk_buff *napi_get_frags(struct napi_struct *napi);
 gro_result_t napi_gro_frags(struct napi_struct *napi);
+struct packet_offload *gro_find_receive_by_type(__be16 type);
+struct packet_offload *gro_find_complete_by_type(__be16 type);
 
 static inline void napi_free_frags(struct napi_struct *napi)
 {
 
 
        skb_gro_reset_offset(skb);
        gro_list_prepare(napi, skb);
+       NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */
 
        rcu_read_lock();
        list_for_each_entry_rcu(ptype, head, list) {
        goto pull;
 }
 
+struct packet_offload *gro_find_receive_by_type(__be16 type)
+{
+       struct list_head *offload_head = &offload_base;
+       struct packet_offload *ptype;
+
+       list_for_each_entry_rcu(ptype, offload_head, list) {
+               if (ptype->type != type || !ptype->callbacks.gro_receive)
+                       continue;
+               return ptype;
+       }
+       return NULL;
+}
+
+struct packet_offload *gro_find_complete_by_type(__be16 type)
+{
+       struct list_head *offload_head = &offload_base;
+       struct packet_offload *ptype;
+
+       list_for_each_entry_rcu(ptype, offload_head, list) {
+               if (ptype->type != type || !ptype->callbacks.gro_complete)
+                       continue;
+               return ptype;
+       }
+       return NULL;
+}
 
 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
 {
 
        return segs;
 }
 
+/* Compute the whole skb csum in s/w and store it, then verify GRO csum
+ * starting from gro_offset.
+ */
+static __sum16 gro_skb_checksum(struct sk_buff *skb)
+{
+       __sum16 sum;
+
+       skb->csum = skb_checksum(skb, 0, skb->len, 0);
+       NAPI_GRO_CB(skb)->csum = csum_sub(skb->csum,
+               csum_partial(skb->data, skb_gro_offset(skb), 0));
+       sum = csum_fold(NAPI_GRO_CB(skb)->csum);
+       if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) {
+               if (unlikely(!sum))
+                       netdev_rx_csum_fault(skb->dev);
+       } else
+               skb->ip_summed = CHECKSUM_COMPLETE;
+
+       return sum;
+}
+
+static struct sk_buff **gre_gro_receive(struct sk_buff **head,
+                                       struct sk_buff *skb)
+{
+       struct sk_buff **pp = NULL;
+       struct sk_buff *p;
+       const struct gre_base_hdr *greh;
+       unsigned int hlen, grehlen;
+       unsigned int off;
+       int flush = 1;
+       struct packet_offload *ptype;
+       __be16 type;
+
+       off = skb_gro_offset(skb);
+       hlen = off + sizeof(*greh);
+       greh = skb_gro_header_fast(skb, off);
+       if (skb_gro_header_hard(skb, hlen)) {
+               greh = skb_gro_header_slow(skb, hlen, off);
+               if (unlikely(!greh))
+                       goto out;
+       }
+
+       /* Only support version 0 and K (key), C (csum) flags. Note that
+        * although the support for the S (seq#) flag can be added easily
+        * for GRO, this is problematic for GSO hence can not be enabled
+        * here because a GRO pkt may end up in the forwarding path, thus
+        * requiring GSO support to break it up correctly.
+        */
+       if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
+               goto out;
+
+       type = greh->protocol;
+
+       rcu_read_lock();
+       ptype = gro_find_receive_by_type(type);
+       if (ptype == NULL)
+               goto out_unlock;
+
+       grehlen = GRE_HEADER_SECTION;
+
+       if (greh->flags & GRE_KEY)
+               grehlen += GRE_HEADER_SECTION;
+
+       if (greh->flags & GRE_CSUM)
+               grehlen += GRE_HEADER_SECTION;
+
+       hlen = off + grehlen;
+       if (skb_gro_header_hard(skb, hlen)) {
+               greh = skb_gro_header_slow(skb, hlen, off);
+               if (unlikely(!greh))
+                       goto out_unlock;
+       }
+       if (greh->flags & GRE_CSUM) { /* Need to verify GRE csum first */
+               __sum16 csum = 0;
+
+               if (skb->ip_summed == CHECKSUM_COMPLETE)
+                       csum = csum_fold(NAPI_GRO_CB(skb)->csum);
+               /* Don't trust csum error calculated/reported by h/w */
+               if (skb->ip_summed == CHECKSUM_NONE || csum != 0)
+                       csum = gro_skb_checksum(skb);
+
+               /* GRE CSUM is the 1's complement of the 1's complement sum
+                * of the GRE hdr plus payload so it should add up to 0xffff
+                * (and 0 after csum_fold()) just like the IPv4 hdr csum.
+                */
+               if (csum)
+                       goto out_unlock;
+       }
+       flush = 0;
+
+       for (p = *head; p; p = p->next) {
+               const struct gre_base_hdr *greh2;
+
+               if (!NAPI_GRO_CB(p)->same_flow)
+                       continue;
+
+               /* The following checks are needed to ensure only pkts
+                * from the same tunnel are considered for aggregation.
+                * The criteria for "the same tunnel" includes:
+                * 1) same version (we only support version 0 here)
+                * 2) same protocol (we only support ETH_P_IP for now)
+                * 3) same set of flags
+                * 4) same key if the key field is present.
+                */
+               greh2 = (struct gre_base_hdr *)(p->data + off);
+
+               if (greh2->flags != greh->flags ||
+                   greh2->protocol != greh->protocol) {
+                       NAPI_GRO_CB(p)->same_flow = 0;
+                       continue;
+               }
+               if (greh->flags & GRE_KEY) {
+                       /* compare keys */
+                       if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) {
+                               NAPI_GRO_CB(p)->same_flow = 0;
+                               continue;
+                       }
+               }
+       }
+
+       skb_gro_pull(skb, grehlen);
+
+       /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
+       skb_gro_postpull_rcsum(skb, greh, grehlen);
+
+       pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+       rcu_read_unlock();
+out:
+       NAPI_GRO_CB(skb)->flush |= flush;
+
+       return pp;
+}
+
+int gre_gro_complete(struct sk_buff *skb, int nhoff)
+{
+       struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff);
+       struct packet_offload *ptype;
+       unsigned int grehlen = sizeof(*greh);
+       int err = -ENOENT;
+       __be16 type;
+
+       type = greh->protocol;
+       if (greh->flags & GRE_KEY)
+               grehlen += GRE_HEADER_SECTION;
+
+       if (greh->flags & GRE_CSUM)
+               grehlen += GRE_HEADER_SECTION;
+
+       rcu_read_lock();
+       ptype = gro_find_complete_by_type(type);
+       if (ptype != NULL)
+               err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
+
+       rcu_read_unlock();
+       return err;
+}
+
 static const struct net_offload gre_offload = {
        .callbacks = {
                .gso_send_check = gre_gso_send_check,
                .gso_segment = gre_gso_segment,
+               .gro_receive = gre_gro_receive,
+               .gro_complete = gre_gro_complete,
        },
 };