min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
                        + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
        err = skb_cow_head(skb, min_headroom);
-       if (unlikely(err)) {
-               kfree_skb(skb);
+       if (unlikely(err))
                goto free_rt;
-       }
 
-       skb = udp_tunnel_handle_offloads(skb, udp_sum);
-       if (IS_ERR(skb)) {
-               err = PTR_ERR(skb);
+       err = udp_tunnel_handle_offloads(skb, udp_sum);
+       if (err)
                goto free_rt;
-       }
 
        gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
        geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
        min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
                        + GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr);
        err = skb_cow_head(skb, min_headroom);
-       if (unlikely(err)) {
-               kfree_skb(skb);
+       if (unlikely(err))
                goto free_dst;
-       }
 
-       skb = udp_tunnel_handle_offloads(skb, udp_sum);
-       if (IS_ERR(skb)) {
-               err = PTR_ERR(skb);
+       err = udp_tunnel_handle_offloads(skb, udp_sum);
+       if (IS_ERR(skb))
                goto free_dst;
-       }
 
        gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
        geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
                err = geneve_build_skb(rt, skb, key->tun_flags, vni,
                                       info->options_len, opts, flags, xnet);
                if (unlikely(err))
-                       goto err;
+                       goto tx_error;
 
                tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
                ttl = key->ttl;
                err = geneve_build_skb(rt, skb, 0, geneve->vni,
                                       0, NULL, flags, xnet);
                if (unlikely(err))
-                       goto err;
+                       goto tx_error;
 
                tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
                ttl = geneve->ttl;
 
 tx_error:
        dev_kfree_skb(skb);
-err:
+
        if (err == -ELOOP)
                dev->stats.collisions++;
        else if (err == -ENETUNREACH)
                                        info->options_len, opts,
                                        flags, xnet);
                if (unlikely(err))
-                       goto err;
+                       goto tx_error;
 
                prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
                ttl = key->ttl;
                err = geneve6_build_skb(dst, skb, 0, geneve->vni,
                                        0, NULL, flags, xnet);
                if (unlikely(err))
-                       goto err;
+                       goto tx_error;
 
                prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
                                           iip, skb);
 
 tx_error:
        dev_kfree_skb(skb);
-err:
+
        if (err == -ELOOP)
                dev->stats.collisions++;
        else if (err == -ENETUNREACH)
 
        if (WARN_ON(!skb))
                return -ENOMEM;
 
-       skb = iptunnel_handle_offloads(skb, type);
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
+       err = iptunnel_handle_offloads(skb, type);
+       if (err)
+               goto out_free;
 
        vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
        vxh->vx_flags = VXLAN_HF_VNI;
 
 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
                                             gfp_t flags);
 
-struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
+int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
 
 static inline int iptunnel_pull_offloads(struct sk_buff *skb)
 {
 
                                    __be16 flags, __be64 tunnel_id,
                                    int md_size);
 
-static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
-                                                        bool udp_csum)
+static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
 {
        int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
 
 
        int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
                                                       SKB_GSO_UDP_TUNNEL;
        __be16 sport;
+       int err;
 
-       skb = iptunnel_handle_offloads(skb, type);
-
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
+       err = iptunnel_handle_offloads(skb, type);
+       if (err)
+               return err;
 
        sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
                                               skb, 0, 0, false);
        __be16 sport;
        void *data;
        bool need_priv = false;
+       int err;
 
        if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
            skb->ip_summed == CHECKSUM_PARTIAL) {
 
        optlen += need_priv ? GUE_LEN_PRIV : 0;
 
-       skb = iptunnel_handle_offloads(skb, type);
-
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
+       err = iptunnel_handle_offloads(skb, type);
+       if (err)
+               return err;
 
        /* Get source port (based on flow hash) before skb_push */
        sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
 
        ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 }
 
-static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
-                                          bool csum)
+static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 {
        return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
        }
 
        /* Push Tunnel header. */
-       skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
-       if (IS_ERR(skb)) {
-               skb = NULL;
+       if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
                goto err_free_rt;
-       }
 
        flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
        build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
                tnl_params = &tunnel->parms.iph;
        }
 
-       skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
-       if (IS_ERR(skb))
-               goto out;
+       if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
+               goto free_skb;
 
        __gre_xmit(skb, dev, tnl_params, skb->protocol);
        return NETDEV_TX_OK;
 
 free_skb:
        kfree_skb(skb);
-out:
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
 }
                return NETDEV_TX_OK;
        }
 
-       skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
-       if (IS_ERR(skb))
-               goto out;
+       if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
+               goto free_skb;
 
        if (skb_cow_head(skb, dev->needed_headroom))
                goto free_skb;
 
 free_skb:
        kfree_skb(skb);
-out:
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
 }
 
 }
 EXPORT_SYMBOL_GPL(iptunnel_metadata_reply);
 
-struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
-                                        int gso_type_mask)
+int iptunnel_handle_offloads(struct sk_buff *skb,
+                            int gso_type_mask)
 {
        int err;
 
        if (skb_is_gso(skb)) {
                err = skb_unclone(skb, GFP_ATOMIC);
                if (unlikely(err))
-                       goto error;
+                       return err;
                skb_shinfo(skb)->gso_type |= gso_type_mask;
-               return skb;
+               return 0;
        }
 
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
                skb->encapsulation = 0;
        }
 
-       return skb;
-error:
-       kfree_skb(skb);
-       return ERR_PTR(err);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
 
 
        if (unlikely(skb->protocol != htons(ETH_P_IP)))
                goto tx_error;
 
-       skb = iptunnel_handle_offloads(skb, SKB_GSO_IPIP);
-       if (IS_ERR(skb))
-               goto out;
+       if (iptunnel_handle_offloads(skb, SKB_GSO_IPIP))
+               goto tx_error;
 
        skb_set_inner_ipproto(skb, IPPROTO_IPIP);
 
 
 tx_error:
        kfree_skb(skb);
-out:
+
        dev->stats.tx_errors++;
        return NETDEV_TX_OK;
 }
 
                goto tx_error;
        }
 
-       skb = iptunnel_handle_offloads(skb, SKB_GSO_SIT);
-       if (IS_ERR(skb)) {
+       if (iptunnel_handle_offloads(skb, SKB_GSO_SIT)) {
                ip_rt_put(rt);
-               goto out;
+               goto tx_error;
        }
 
        if (df) {
        dst_link_failure(skb);
 tx_error:
        kfree_skb(skb);
-out:
        dev->stats.tx_errors++;
        return NETDEV_TX_OK;
 }
        struct ip_tunnel *tunnel = netdev_priv(dev);
        const struct iphdr  *tiph = &tunnel->parms.iph;
 
-       skb = iptunnel_handle_offloads(skb, SKB_GSO_IPIP);
-       if (IS_ERR(skb))
-               goto out;
+       if (iptunnel_handle_offloads(skb, SKB_GSO_IPIP))
+               goto tx_error;
 
        skb_set_inner_ipproto(skb, IPPROTO_IPIP);
 
        ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP);
        return NETDEV_TX_OK;
-out:
+tx_error:
+       kfree_skb(skb);
        dev->stats.tx_errors++;
        return NETDEV_TX_OK;
 }
 
        if (IS_ERR(skb))
                goto tx_error;
 
-       skb = iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af));
-       if (IS_ERR(skb))
+       if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af)))
                goto tx_error;
 
        skb->transport_header = skb->network_header;
        if (IS_ERR(skb))
                goto tx_error;
 
-       skb = iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af));
-       if (IS_ERR(skb))
+       if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af)))
                goto tx_error;
 
        skb->transport_header = skb->network_header;