#include <linux/list.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
 
 /* Version Information */
 #define DRIVER_VERSION "v1.06.0 (2014/03/03)"
        __le32 opts2;
 #define RD_UDP_CS                      (1 << 23)
 #define RD_TCP_CS                      (1 << 22)
+#define RD_IPV6_CS                     (1 << 20)
 #define RD_IPV4_CS                     (1 << 19)
 
        __le32 opts3;
 #define TX_FS                  (1 << 31) /* First segment of a packet */
 #define TX_LS                  (1 << 30) /* Final segment of a packet */
 #define GTSENDV4               (1 << 28)
+#define GTSENDV6               (1 << 27)
 #define GTTCPHO_SHIFT          18
+#define GTTCPHO_MAX            0x7fU
 #define TX_LEN_MAX             0x3ffffU
 
        __le32 opts2;
 #define MSS_SHIFT              17
 #define MSS_MAX                        0x7ffU
 #define TCPHO_SHIFT            17
+#define TCPHO_MAX              0x7ffU
 };
 
 struct r8152;
        return protocol;
 }
 
+/*
+ * r8152_csum_workaround()
+ * The hw limites the value the transport offset. When the offset is out of the
+ * range, calculate the checksum by sw.
+ */
+static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
+                                 struct sk_buff_head *list)
+{
+       if (skb_shinfo(skb)->gso_size) {
+               netdev_features_t features = tp->netdev->features;
+               struct sk_buff_head seg_list;
+               struct sk_buff *segs, *nskb;
+
+               features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
+               segs = skb_gso_segment(skb, features);
+               if (IS_ERR(segs) || !segs)
+                       goto drop;
+
+               __skb_queue_head_init(&seg_list);
+
+               do {
+                       nskb = segs;
+                       segs = segs->next;
+                       nskb->next = NULL;
+                       __skb_queue_tail(&seg_list, nskb);
+               } while (segs);
+
+               skb_queue_splice(&seg_list, list);
+               dev_kfree_skb(skb);
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               if (skb_checksum_help(skb) < 0)
+                       goto drop;
+
+               __skb_queue_head(list, skb);
+       } else {
+               struct net_device_stats *stats;
+
+drop:
+               stats = &tp->netdev->stats;
+               stats->tx_dropped++;
+               dev_kfree_skb(skb);
+       }
+}
+
+/*
+ * msdn_giant_send_check()
+ * According to the document of microsoft, the TCP Pseudo Header excludes the
+ * packet length for IPv6 TCP large packets.
+ */
+static int msdn_giant_send_check(struct sk_buff *skb)
+{
+       const struct ipv6hdr *ipv6h;
+       struct tcphdr *th;
+
+       ipv6h = ipv6_hdr(skb);
+       th = tcp_hdr(skb);
+
+       th->check = 0;
+       th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
+
+       return 0;
+}
+
 static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
                         struct sk_buff *skb, u32 len, u32 transport_offset)
 {
        opts1 = len | TX_FS | TX_LS;
 
        if (mss) {
+               if (transport_offset > GTTCPHO_MAX) {
+                       netif_warn(tp, tx_err, tp->netdev,
+                                  "Invalid transport offset 0x%x for TSO\n",
+                                  transport_offset);
+                       ret = TX_CSUM_TSO;
+                       goto unavailable;
+               }
+
                switch (get_protocol(skb)) {
                case htons(ETH_P_IP):
                        opts1 |= GTSENDV4;
                        break;
 
+               case htons(ETH_P_IPV6):
+                       opts1 |= GTSENDV6;
+                       msdn_giant_send_check(skb);
+                       break;
+
                default:
                        WARN_ON_ONCE(1);
                        break;
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
                u8 ip_protocol;
 
+               if (transport_offset > TCPHO_MAX) {
+                       netif_warn(tp, tx_err, tp->netdev,
+                                  "Invalid transport offset 0x%x\n",
+                                  transport_offset);
+                       ret = TX_CSUM_NONE;
+                       goto unavailable;
+               }
+
                switch (get_protocol(skb)) {
                case htons(ETH_P_IP):
                        opts2 |= IPV4_CS;
        desc->opts2 = cpu_to_le32(opts2);
        desc->opts1 = cpu_to_le32(opts1);
 
+unavailable:
        return ret;
 }
 
 
                offset = (u32)skb_transport_offset(skb);
 
-               r8152_tx_csum(tp, tx_desc, skb, skb->len, offset);
+               if (r8152_tx_csum(tp, tx_desc, skb, skb->len, offset)) {
+                       r8152_csum_workaround(tp, skb, &skb_head);
+                       continue;
+               }
 
                tx_data += sizeof(*tx_desc);
 
                        checksum = CHECKSUM_NONE;
                else
                        checksum = CHECKSUM_UNNECESSARY;
+       } else if (RD_IPV6_CS) {
+               if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF))
+                       checksum = CHECKSUM_UNNECESSARY;
+               else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF))
+                       checksum = CHECKSUM_UNNECESSARY;
        }
 
 return_result:
        netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
 
        netdev->features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
-                           NETIF_F_TSO | NETIF_F_FRAGLIST;
+                           NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM |
+                           NETIF_F_TSO6;
        netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
-                             NETIF_F_TSO | NETIF_F_FRAGLIST;
+                             NETIF_F_TSO | NETIF_F_FRAGLIST |
+                             NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
 
        SET_ETHTOOL_OPS(netdev, &ops);
        netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);