*/
        struct skb_mstamp first_sackt;
        struct skb_mstamp last_sackt;
+       struct skb_mstamp ack_time; /* Timestamp when the S/ACK was received */
        struct rate_sample *rate;
        int     flag;
 };
                return sacked;
 
        if (!(sacked & TCPCB_SACKED_ACKED)) {
-               tcp_rack_advance(tp, xmit_time, sacked);
+               tcp_rack_advance(tp, sacked, xmit_time, &state->ack_time);
 
                if (sacked & TCPCB_SACKED_RETRANS) {
                        /* If the segment is not tagged as lost,
  * tcp_xmit_retransmit_queue().
  */
 static void tcp_fastretrans_alert(struct sock *sk, const int acked,
-                                 bool is_dupack, int *ack_flag, int *rexmit)
+                                 bool is_dupack, int *ack_flag, int *rexmit,
+                                 const struct skb_mstamp *ack_time)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS) {
                u32 prior_retrans = tp->retrans_out;
 
-               tcp_rack_mark_lost(sk);
+               tcp_rack_mark_lost(sk, ack_time);
                if (prior_retrans > tp->retrans_out) {
                        flag |= FLAG_LOST_RETRANS;
                        *ack_flag |= FLAG_LOST_RETRANS;
  */
 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                               u32 prior_snd_una, int *acked,
-                              struct tcp_sacktag_state *sack,
-                              struct skb_mstamp *now)
+                              struct tcp_sacktag_state *sack)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct skb_mstamp first_ackt, last_ackt;
+       struct skb_mstamp *now = &sack->ack_time;
        struct tcp_sock *tp = tcp_sk(sk);
        u32 prior_sacked = tp->sacked_out;
        u32 reord = tp->packets_out;
                } else if (tcp_is_sack(tp)) {
                        tp->delivered += acked_pcount;
                        if (!tcp_skb_spurious_retrans(tp, skb))
-                               tcp_rack_advance(tp, &skb->skb_mstamp, sacked);
+                               tcp_rack_advance(tp, sacked,
+                                                &skb->skb_mstamp,
+                                                &sack->ack_time);
                }
                if (sacked & TCPCB_LOST)
                        tp->lost_out -= acked_pcount;
        u32 lost = tp->lost;
        int acked = 0; /* Number of packets newly acked */
        int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
-       struct skb_mstamp now;
 
        sack_state.first_sackt.v64 = 0;
        sack_state.rate = &rs;
        if (after(ack, tp->snd_nxt))
                goto invalid_ack;
 
-       skb_mstamp_get(&now);
+       skb_mstamp_get(&sack_state.ack_time);
 
        if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
            icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
 
        /* See if we can take anything off of the retransmit queue. */
        flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
-                                   &sack_state, &now);
+                                   &sack_state);
 
        if (tcp_ack_is_dubious(sk, flag)) {
                is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
-               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
+               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit,
+                                     &sack_state.ack_time);
        }
        if (tp->tlp_high_seq)
                tcp_process_tlp_ack(sk, ack, flag);
                tcp_schedule_loss_probe(sk);
        delivered = tp->delivered - delivered;  /* freshly ACKed or SACKed */
        lost = tp->lost - lost;                 /* freshly marked lost */
-       tcp_rate_gen(sk, delivered, lost, &now, &rs);
-       tcp_cong_control(sk, ack, delivered, flag, &rs);
+       tcp_rate_gen(sk, delivered, lost, &sack_state.ack_time,
+                    sack_state.rate);
+       tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
        tcp_xmit_recovery(sk, rexmit);
        return 1;
 
 no_queue:
        /* If data was DSACKed, see if we can undo a cwnd reduction. */
        if (flag & FLAG_DSACKING_ACK)
-               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
+               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit,
+                                     &sack_state.ack_time);
        /* If this ack opens up a zero window, clear backoff.  It was
         * being used to time the probes, and is probably far higher than
         * it needs to be for normal retransmission.
         * If data was DSACKed, see if we can undo a cwnd reduction.
         */
        if (TCP_SKB_CB(skb)->sacked) {
+               skb_mstamp_get(&sack_state.ack_time);
                flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
                                                &sack_state);
-               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
+               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit,
+                                     &sack_state.ack_time);
                tcp_xmit_recovery(sk, rexmit);
        }
 
 
  * The current version is only used after recovery starts but can be
  * easily extended to detect the first loss.
  */
-static void tcp_rack_detect_loss(struct sock *sk)
+static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
                        continue;
 
                if (skb_mstamp_after(&tp->rack.mstamp, &skb->skb_mstamp)) {
-
-                       if (skb_mstamp_us_delta(&tp->rack.mstamp,
-                                               &skb->skb_mstamp) <= reo_wnd)
-                               continue;
-
-                       /* skb is lost if packet sent later is sacked */
-                       tcp_rack_mark_skb_lost(sk, skb);
+                       /* Step 3 in draft-cheng-tcpm-rack-00.txt:
+                        * A packet is lost if its elapsed time is beyond
+                        * the recent RTT plus the reordering window.
+                        */
+                       if (skb_mstamp_us_delta(now, &skb->skb_mstamp) >
+                           tp->rack.rtt_us + reo_wnd) {
+                               tcp_rack_mark_skb_lost(sk, skb);
+                       }
                } else if (!(scb->sacked & TCPCB_RETRANS)) {
                        /* Original data are sent sequentially so stop early
                         * b/c the rest are all sent after rack_sent
        }
 }
 
-void tcp_rack_mark_lost(struct sock *sk)
+void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
                return;
        /* Reset the advanced flag to avoid unnecessary queue scanning */
        tp->rack.advanced = 0;
-       tcp_rack_detect_loss(sk);
+       tcp_rack_detect_loss(sk, now);
 }
 
-/* Record the most recently (re)sent time among the (s)acked packets */
-void tcp_rack_advance(struct tcp_sock *tp,
-                     const struct skb_mstamp *xmit_time, u8 sacked)
+/* Record the most recently (re)sent time among the (s)acked packets
+ * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
+ * draft-cheng-tcpm-rack-00.txt
+ */
+void tcp_rack_advance(struct tcp_sock *tp, u8 sacked,
+                     const struct skb_mstamp *xmit_time,
+                     const struct skb_mstamp *ack_time)
 {
+       u32 rtt_us;
+
        if (tp->rack.mstamp.v64 &&
            !skb_mstamp_after(xmit_time, &tp->rack.mstamp))
                return;
 
+       rtt_us = skb_mstamp_us_delta(ack_time, xmit_time);
        if (sacked & TCPCB_RETRANS) {
-               struct skb_mstamp now;
-
                /* If the sacked packet was retransmitted, it's ambiguous
                 * whether the retransmission or the original (or the prior
                 * retransmission) was sacked.
                 * so it's at least one RTT (i.e., retransmission is at least
                 * an RTT later).
                 */
-               skb_mstamp_get(&now);
-               if (skb_mstamp_us_delta(&now, xmit_time) < tcp_min_rtt(tp))
+               if (rtt_us < tcp_min_rtt(tp))
                        return;
        }
-
+       tp->rack.rtt_us = rtt_us;
        tp->rack.mstamp = *xmit_time;
        tp->rack.advanced = 1;
 }