return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
 }
 
+/* provide the departure time in us unit */
+static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
+{
+       return skb->skb_mstamp;
+}
+
 
 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
 
 {
        const struct sk_buff *skb = tcp_rtx_queue_head(sk);
        u32 rto = inet_csk(sk)->icsk_rto;
-       u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
+       u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
 
        return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
 }
 
         */
        tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
                        start_seq, end_seq, dup_sack, pcount,
-                       skb->skb_mstamp);
+                       tcp_skb_timestamp_us(skb));
        tcp_rate_skb_delivered(sk, skb, state->rate);
 
        if (skb == tp->lost_skb_hint)
                                                TCP_SKB_CB(skb)->end_seq,
                                                dup_sack,
                                                tcp_skb_pcount(skb),
-                                               skb->skb_mstamp);
+                                               tcp_skb_timestamp_us(skb));
                        tcp_rate_skb_delivered(sk, skb, state->rate);
                        if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
                                list_del_init(&skb->tcp_tsorted_anchor);
                                tp->retrans_out -= acked_pcount;
                        flag |= FLAG_RETRANS_DATA_ACKED;
                } else if (!(sacked & TCPCB_SACKED_ACKED)) {
-                       last_ackt = skb->skb_mstamp;
+                       last_ackt = tcp_skb_timestamp_us(skb);
                        WARN_ON_ONCE(last_ackt == 0);
                        if (!first_ackt)
                                first_ackt = last_ackt;
                        tp->delivered += acked_pcount;
                        if (!tcp_skb_spurious_retrans(tp, skb))
                                tcp_rack_advance(tp, sacked, scb->end_seq,
-                                                skb->skb_mstamp);
+                                                tcp_skb_timestamp_us(skb));
                }
                if (sacked & TCPCB_LOST)
                        tp->lost_out -= acked_pcount;
                        tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
                }
        } else if (skb && rtt_update && sack_rtt_us >= 0 &&
-                  sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) {
+                  sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp,
+                                                   tcp_skb_timestamp_us(skb))) {
                /* Do not re-arm RTO if the sack RTT is measured from data sent
                 * after when the head was last (re)transmitted. Otherwise the
                 * timeout may continue to extend in loss recovery.
 
                BUG_ON(!skb);
 
                tcp_mstamp_refresh(tp);
-               delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
+               delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
                remaining = icsk->icsk_rto -
                            usecs_to_jiffies(delta_us);
 
 
        head = tcp_rtx_queue_head(sk);
        if (!head)
                goto send_now;
-       age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp);
+       age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head));
        /* If next ACK is likely to come too late (half srtt), do not defer */
        if (age < (tp->srtt_us >> 4))
                goto send_now;
 
          * bandwidth estimate.
          */
        if (!tp->packets_out) {
-               tp->first_tx_mstamp  = skb->skb_mstamp;
-               tp->delivered_mstamp = skb->skb_mstamp;
+               u64 tstamp_us = tcp_skb_timestamp_us(skb);
+
+               tp->first_tx_mstamp  = tstamp_us;
+               tp->delivered_mstamp = tstamp_us;
        }
 
        TCP_SKB_CB(skb)->tx.first_tx_mstamp     = tp->first_tx_mstamp;
                rs->is_app_limited   = scb->tx.is_app_limited;
                rs->is_retrans       = scb->sacked & TCPCB_RETRANS;
 
+               /* Record send time of most recently ACKed packet: */
+               tp->first_tx_mstamp  = tcp_skb_timestamp_us(skb);
                /* Find the duration of the "send phase" of this window: */
-               rs->interval_us      = tcp_stamp_us_delta(
-                                               skb->skb_mstamp,
-                                               scb->tx.first_tx_mstamp);
+               rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
+                                                    scb->tx.first_tx_mstamp);
 
-               /* Record send time of most recently ACKed packet: */
-               tp->first_tx_mstamp  = skb->skb_mstamp;
        }
        /* Mark off the skb delivered once it's sacked to avoid being
         * used again when it's cumulatively acked. For acked packets
 
 s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
 {
        return tp->rack.rtt_us + reo_wnd -
-              tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp);
+              tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
 }
 
 /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
                    !(scb->sacked & TCPCB_SACKED_RETRANS))
                        continue;
 
-               if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
+               if (!tcp_rack_sent_after(tp->rack.mstamp,
+                                        tcp_skb_timestamp_us(skb),
                                         tp->rack.end_seq, scb->end_seq))
                        break;