NETIF_F_UFO_BIT,                /* ... UDPv4 fragmentation */
        NETIF_F_GSO_ROBUST_BIT,         /* ... ->SKB_GSO_DODGY */
        NETIF_F_TSO_ECN_BIT,            /* ... TCP ECN support */
+       NETIF_F_TSO_MANGLEID_BIT,       /* ... IPV4 ID mangling allowed */
        NETIF_F_TSO6_BIT,               /* ... TCPv6 segmentation */
        NETIF_F_FSO_BIT,                /* ... FCoE segmentation */
        NETIF_F_GSO_GRE_BIT,            /* ... GRE with TSO */
 #define NETIF_F_GSO_SIT                __NETIF_F(GSO_SIT)
 #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
 #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM)
+#define NETIF_F_TSO_MANGLEID   __NETIF_F(TSO_MANGLEID)
 #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
 #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
 #define NETIF_F_HW_VLAN_STAG_RX        __NETIF_F(HW_VLAN_STAG_RX)
 
 /* List of features with software fallbacks. */
 #define NETIF_F_GSO_SOFTWARE   (NETIF_F_TSO | NETIF_F_TSO_ECN | \
+                                NETIF_F_TSO_MANGLEID | \
                                 NETIF_F_TSO6 | NETIF_F_UFO)
 
 /* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be
 
        BUILD_BUG_ON(SKB_GSO_UDP     != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
+       BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_GRE     != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
 
        /* This indicates the tcp segment has CWR set. */
        SKB_GSO_TCP_ECN = 1 << 3,
 
-       SKB_GSO_TCPV6 = 1 << 4,
+       SKB_GSO_TCP_FIXEDID = 1 << 4,
 
-       SKB_GSO_FCOE = 1 << 5,
+       SKB_GSO_TCPV6 = 1 << 5,
 
-       SKB_GSO_GRE = 1 << 6,
+       SKB_GSO_FCOE = 1 << 6,
 
-       SKB_GSO_GRE_CSUM = 1 << 7,
+       SKB_GSO_GRE = 1 << 7,
 
-       SKB_GSO_IPIP = 1 << 8,
+       SKB_GSO_GRE_CSUM = 1 << 8,
 
-       SKB_GSO_SIT = 1 << 9,
+       SKB_GSO_IPIP = 1 << 9,
 
-       SKB_GSO_UDP_TUNNEL = 1 << 10,
+       SKB_GSO_SIT = 1 << 10,
 
-       SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
+       SKB_GSO_UDP_TUNNEL = 1 << 11,
 
-       SKB_GSO_TUNNEL_REMCSUM = 1 << 12,
+       SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12,
+
+       SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
 };
 
 #if BITS_PER_LONG > 32
 
        return vlan_features_check(skb, features);
 }
 
+static netdev_features_t gso_features_check(const struct sk_buff *skb,
+                                           struct net_device *dev,
+                                           netdev_features_t features)
+{
+       u16 gso_segs = skb_shinfo(skb)->gso_segs;
+
+       if (gso_segs > dev->gso_max_segs)
+               return features & ~NETIF_F_GSO_MASK;
+
+       /* Make sure to clear the IPv4 ID mangling feature if
+        * the IPv4 header has the potential to be fragmented.
+        */
+       if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+               struct iphdr *iph = skb->encapsulation ?
+                                   inner_ip_hdr(skb) : ip_hdr(skb);
+
+               if (!(iph->frag_off & htons(IP_DF)))
+                       features &= ~NETIF_F_TSO_MANGLEID;
+       }
+
+       return features;
+}
+
 netdev_features_t netif_skb_features(struct sk_buff *skb)
 {
        struct net_device *dev = skb->dev;
        netdev_features_t features = dev->features;
-       u16 gso_segs = skb_shinfo(skb)->gso_segs;
 
-       if (gso_segs > dev->gso_max_segs)
-               features &= ~NETIF_F_GSO_MASK;
+       if (skb_is_gso(skb))
+               features = gso_features_check(skb, dev, features);
 
        /* If encapsulation offload request, verify we are testing
         * hardware encapsulation features instead of standard
        dev->features |= NETIF_F_SOFT_FEATURES;
        dev->wanted_features = dev->features & dev->hw_features;
 
-       if (!(dev->flags & IFF_LOOPBACK)) {
+       if (!(dev->flags & IFF_LOOPBACK))
                dev->hw_features |= NETIF_F_NOCACHE_COPY;
-       }
+
+       if (dev->hw_features & NETIF_F_TSO)
+               dev->hw_features |= NETIF_F_TSO_MANGLEID;
 
        /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
         */
 
        [NETIF_F_UFO_BIT] =              "tx-udp-fragmentation",
        [NETIF_F_GSO_ROBUST_BIT] =       "tx-gso-robust",
        [NETIF_F_TSO_ECN_BIT] =          "tx-tcp-ecn-segmentation",
+       [NETIF_F_TSO_MANGLEID_BIT] =     "tx-tcp-mangleid-segmentation",
        [NETIF_F_TSO6_BIT] =             "tx-tcp6-segmentation",
        [NETIF_F_FSO_BIT] =              "tx-fcoe-segmentation",
        [NETIF_F_GSO_GRE_BIT] =          "tx-gre-segmentation",
 
 static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                                        netdev_features_t features)
 {
+       bool udpfrag = false, fixedid = false, encap;
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        const struct net_offload *ops;
        unsigned int offset = 0;
-       bool udpfrag, encap;
        struct iphdr *iph;
        int proto;
        int nhoff;
                       SKB_GSO_TCPV6 |
                       SKB_GSO_UDP_TUNNEL |
                       SKB_GSO_UDP_TUNNEL_CSUM |
+                      SKB_GSO_TCP_FIXEDID |
                       SKB_GSO_TUNNEL_REMCSUM |
                       0)))
                goto out;
 
        segs = ERR_PTR(-EPROTONOSUPPORT);
 
-       if (skb->encapsulation &&
-           skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP))
-               udpfrag = proto == IPPROTO_UDP && encap;
-       else
-               udpfrag = proto == IPPROTO_UDP && !skb->encapsulation;
+       if (!skb->encapsulation || encap) {
+               udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
+               fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
+
+               /* fixed ID is invalid if DF bit is not set */
+               if (fixedid && !(iph->frag_off & htons(IP_DF)))
+                       goto out;
+       }
 
        ops = rcu_dereference(inet_offloads[proto]);
        if (likely(ops && ops->callbacks.gso_segment))
        do {
                iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
                if (udpfrag) {
-                       iph->id = htons(id);
                        iph->frag_off = htons(offset >> 3);
                        if (skb->next)
                                iph->frag_off |= htons(IP_MF);
                        offset += skb->len - nhoff - ihl;
-               } else {
+               } else if (!fixedid) {
                        iph->id = htons(id++);
                }
                iph->tot_len = htons(skb->len - nhoff);
 
                                  SKB_GSO_UDP |
                                  SKB_GSO_DODGY |
                                  SKB_GSO_TCP_ECN |
+                                 SKB_GSO_TCP_FIXEDID |
                                  SKB_GSO_GRE |
                                  SKB_GSO_GRE_CSUM |
                                  SKB_GSO_IPIP |
 
                             ~(SKB_GSO_TCPV4 |
                               SKB_GSO_DODGY |
                               SKB_GSO_TCP_ECN |
+                              SKB_GSO_TCP_FIXEDID |
                               SKB_GSO_TCPV6 |
                               SKB_GSO_GRE |
                               SKB_GSO_GRE_CSUM |
                               SKB_GSO_UDP_TUNNEL_CSUM |
                               SKB_GSO_TUNNEL_REMCSUM |
                               0) ||
-                            !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
+                            !(type & (SKB_GSO_TCPV4 |
+                                      SKB_GSO_TCPV6))))
                        goto out;
 
                skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 
                       SKB_GSO_UDP |
                       SKB_GSO_DODGY |
                       SKB_GSO_TCP_ECN |
+                      SKB_GSO_TCP_FIXEDID |
+                      SKB_GSO_TCPV6 |
                       SKB_GSO_GRE |
                       SKB_GSO_GRE_CSUM |
                       SKB_GSO_IPIP |
                       SKB_GSO_UDP_TUNNEL |
                       SKB_GSO_UDP_TUNNEL_CSUM |
                       SKB_GSO_TUNNEL_REMCSUM |
-                      SKB_GSO_TCPV6 |
                       0)))
                goto out;
 
 
                                  SKB_GSO_TCPV6 |
                                  SKB_GSO_UDP |
                                  SKB_GSO_DODGY |
+                                 SKB_GSO_TCP_FIXEDID |
                                  SKB_GSO_TCP_ECN)))
                goto out;