static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       struct sk_buff *skb;
+       struct sk_buff *skb, *n;
        u32 reo_wnd;
 
        *reo_timeout = 0;
        if ((tp->rack.reord || !tp->lost_out) && tcp_min_rtt(tp) != ~0U)
                reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
 
-       tcp_for_write_queue(skb, sk) {
+       list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
+                                tcp_tsorted_anchor) {
                struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
 
-               if (skb == tcp_send_head(sk))
-                       break;
-
-               /* Skip ones already (s)acked */
-               if (!after(scb->end_seq, tp->snd_una) ||
-                   scb->sacked & TCPCB_SACKED_ACKED)
-                       continue;
-
                if (tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
                                        tp->rack.end_seq, scb->end_seq)) {
                        /* Step 3 in draft-cheng-tcpm-rack-00.txt:
 
                        if (remaining < 0) {
                                tcp_rack_mark_skb_lost(sk, skb);
+                               list_del_init(&skb->tcp_tsorted_anchor);
                                continue;
                        }
 
 
                        /* Record maximum wait time (+1 to avoid 0) */
                        *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
-
-               } else if (!(scb->sacked & TCPCB_RETRANS)) {
-                       /* Original data are sent sequentially so stop early
-                        * b/c the rest are all sent after rack_sent
-                        */
+               } else {
                        break;
                }
        }