*     @nf_trace: netfilter packet trace flag
  *     @protocol: Packet protocol from driver
  *     @destructor: Destruct function
+ *     @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue)
  *     @_nfct: Associated connection, if any (with nfctinfo bits)
  *     @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
  *     @skb_iif: ifindex of device we arrived on
         */
        char                    cb[48] __aligned(8);
 
-       unsigned long           _skb_refdst;
-       void                    (*destructor)(struct sk_buff *skb);
+       union {
+               struct {
+                       unsigned long   _skb_refdst;
+                       void            (*destructor)(struct sk_buff *skb);
+               };
+               struct list_head        tcp_tsorted_anchor;
+       };
+
 #ifdef CONFIG_XFRM
        struct  sec_path        *sp;
 #endif
 
        u32     tsoffset;       /* timestamp offset */
 
        struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
+       struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */
 
        u32     snd_wl1;        /* Sequence for window update           */
        u32     snd_wnd;        /* The window we expect to receive      */
 
 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
 
+/* This helper is needed, because skb->tcp_tsorted_anchor uses
+ * the same memory storage than skb->destructor/_skb_refdst
+ */
+static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
+{
+       skb->destructor = NULL;
+       skb->_skb_refdst = 0UL;
+}
+
+#define tcp_skb_tsorted_save(skb) {            \
+       unsigned long _save = skb->_skb_refdst; \
+       skb->_skb_refdst = 0UL;
+
+#define tcp_skb_tsorted_restore(skb)           \
+       skb->_skb_refdst = _save;               \
+}
+
 /* write queue abstraction */
 static inline void tcp_write_queue_purge(struct sock *sk)
 {
        struct sk_buff *skb;
 
        tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
-       while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
+       while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
+               tcp_skb_tsorted_anchor_cleanup(skb);
                sk_wmem_free_skb(sk, skb);
+       }
+       INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
        sk_mem_reclaim(sk);
        tcp_clear_all_retrans_hints(tcp_sk(sk));
 }
 
 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
 {
+       list_del(&skb->tcp_tsorted_anchor);
+       tcp_skb_tsorted_anchor_cleanup(skb);
        __skb_unlink(skb, &sk->sk_write_queue);
 }
 
 
        tp->out_of_order_queue = RB_ROOT;
        tcp_init_xmit_timers(sk);
        INIT_LIST_HEAD(&tp->tsq_node);
+       INIT_LIST_HEAD(&tp->tsorted_sent_queue);
 
        icsk->icsk_rto = TCP_TIMEOUT_INIT;
        tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
                         * available to the caller, no more, no less.
                         */
                        skb->reserved_tailroom = skb->end - skb->tail - size;
+                       INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
                        return skb;
                }
                __kfree_skb(skb);
 
                                                tcp_skb_pcount(skb),
                                                skb->skb_mstamp);
                        tcp_rate_skb_delivered(sk, skb, state->rate);
+                       if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
+                               list_del_init(&skb->tcp_tsorted_anchor);
 
                        if (!before(TCP_SKB_CB(skb)->seq,
                                    tcp_highest_sack_seq(tp)))
 
        shinfo = skb_shinfo(skb);
        if (!before(shinfo->tskey, prior_snd_una) &&
-           before(shinfo->tskey, tcp_sk(sk)->snd_una))
-               __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
+           before(shinfo->tskey, tcp_sk(sk)->snd_una)) {
+               tcp_skb_tsorted_save(skb) {
+                       __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
+               } tcp_skb_tsorted_restore(skb);
+       }
 }
 
 /* Remove acknowledged frames from the retransmission queue. If our packet
 
                newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
 
                INIT_LIST_HEAD(&newtp->tsq_node);
+               INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
 
                tcp_init_wl(newtp, treq->rcv_isn);
 
 
                      HRTIMER_MODE_ABS_PINNED);
 }
 
+static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
+{
+       skb->skb_mstamp = tp->tcp_mstamp;
+       list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
+}
+
 /* This routine actually transmits TCP packets queued in by
  * tcp_do_sendmsg().  This is used by both the initial
  * transmission and possible later retransmissions.
                TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
                        - tp->snd_una;
                oskb = skb;
-               if (unlikely(skb_cloned(skb)))
-                       skb = pskb_copy(skb, gfp_mask);
-               else
-                       skb = skb_clone(skb, gfp_mask);
+
+               tcp_skb_tsorted_save(oskb) {
+                       if (unlikely(skb_cloned(oskb)))
+                               skb = pskb_copy(oskb, gfp_mask);
+                       else
+                               skb = skb_clone(oskb, gfp_mask);
+               } tcp_skb_tsorted_restore(oskb);
+
                if (unlikely(!skb))
                        return -ENOBUFS;
        }
                err = net_xmit_eval(err);
        }
        if (!err && oskb) {
-               oskb->skb_mstamp = tp->tcp_mstamp;
+               tcp_update_skb_after_send(tp, oskb);
                tcp_rate_skb_sent(sk, oskb);
        }
        return err;
        /* Link BUFF into the send queue. */
        __skb_header_release(buff);
        tcp_insert_write_queue_after(skb, buff, sk);
+       list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
 
        return 0;
 }
 
                if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
                        /* "skb_mstamp" is used as a start point for the retransmit timer */
-                       skb->skb_mstamp = tp->tcp_mstamp;
+                       tcp_update_skb_after_send(tp, skb);
                        goto repair; /* Skip network transmission */
                }
 
                     skb_headroom(skb) >= 0xFFFF)) {
                struct sk_buff *nskb;
 
-               nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
-               err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
-                            -ENOBUFS;
+               tcp_skb_tsorted_save(skb) {
+                       nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
+                       err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
+                                    -ENOBUFS;
+               } tcp_skb_tsorted_restore(skb);
+
                if (!err)
-                       skb->skb_mstamp = tp->tcp_mstamp;
+                       tcp_update_skb_after_send(tp, skb);
        } else {
                err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
        }
                                goto coalesce;
                        return;
                }
+               INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
                skb_reserve(skb, MAX_TCP_HEADER);
                sk_forced_mem_schedule(sk, skb->truesize);
                /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
        }
        if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
                if (skb_cloned(skb)) {
-                       struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
+                       struct sk_buff *nskb;
+
+                       tcp_skb_tsorted_save(skb) {
+                               nskb = skb_copy(skb, GFP_ATOMIC);
+                       } tcp_skb_tsorted_restore(skb);
                        if (!nskb)
                                return -ENOMEM;
+                       INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
                        tcp_unlink_write_queue(skb, sk);
                        __skb_header_release(nskb);
                        __tcp_add_write_queue_head(sk, nskb);