struct sk_buff          *prev;
 
                        union {
-                               ktime_t         tstamp;
-                               u64             skb_mstamp;
+                               struct net_device       *dev;
+                               /* Some protocols might use this space to store information,
+                                * while device pointer would be NULL.
+                                * UDP receive path is one user.
+                                */
+                               unsigned long           dev_scratch;
                        };
                };
                struct rb_node  rbnode; /* used in netem & tcp stack */
        struct sock             *sk;
 
        union {
-               struct net_device       *dev;
-               /* Some protocols might use this space to store information,
-                * while device pointer would be NULL.
-                * UDP receive path is one user.
-                */
-               unsigned long           dev_scratch;
+               ktime_t         tstamp;
+               u64             skb_mstamp;
        };
        /*
         * This is the control buffer. It is free to use for every
 
        tp->rx_opt.num_sacks = num_sacks;
 }
 
-enum tcp_queue {
-       OOO_QUEUE,
-       RCV_QUEUE,
-};
-
 /**
  * tcp_try_coalesce - try to merge skb to prior one
  * @sk: socket
  * Returns true if caller should free @from instead of queueing it
  */
 static bool tcp_try_coalesce(struct sock *sk,
-                            enum tcp_queue dest,
                             struct sk_buff *to,
                             struct sk_buff *from,
                             bool *fragstolen)
 
        if (TCP_SKB_CB(from)->has_rxtstamp) {
                TCP_SKB_CB(to)->has_rxtstamp = true;
-               if (dest == OOO_QUEUE)
-                       TCP_SKB_CB(to)->swtstamp = TCP_SKB_CB(from)->swtstamp;
-               else
-                       to->tstamp = from->tstamp;
+               to->tstamp = from->tstamp;
        }
 
        return true;
                }
                p = rb_next(p);
                rb_erase(&skb->rbnode, &tp->out_of_order_queue);
-               /* Replace tstamp which was stomped by rbnode */
-               if (TCP_SKB_CB(skb)->has_rxtstamp)
-                       skb->tstamp = TCP_SKB_CB(skb)->swtstamp;
 
                if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) {
                        SOCK_DEBUG(sk, "ofo packet was already received\n");
                           TCP_SKB_CB(skb)->end_seq);
 
                tail = skb_peek_tail(&sk->sk_receive_queue);
-               eaten = tail && tcp_try_coalesce(sk, RCV_QUEUE,
-                                                tail, skb, &fragstolen);
+               eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
                tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
                fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
                if (!eaten)
                return;
        }
 
-       /* Stash tstamp to avoid being stomped on by rbnode */
-       if (TCP_SKB_CB(skb)->has_rxtstamp)
-               TCP_SKB_CB(skb)->swtstamp = skb->tstamp;
-
        /* Disable header prediction. */
        tp->pred_flags = 0;
        inet_csk_schedule_ack(sk);
        /* In the typical case, we are adding an skb to the end of the list.
         * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
         */
-       if (tcp_try_coalesce(sk, OOO_QUEUE, tp->ooo_last_skb,
+       if (tcp_try_coalesce(sk, tp->ooo_last_skb,
                             skb, &fragstolen)) {
 coalesce_done:
                tcp_grow_window(sk, skb);
                                __kfree_skb(skb1);
                                goto merge_right;
                        }
-               } else if (tcp_try_coalesce(sk, OOO_QUEUE, skb1,
+               } else if (tcp_try_coalesce(sk, skb1,
                                            skb, &fragstolen)) {
                        goto coalesce_done;
                }
 
        __skb_pull(skb, hdrlen);
        eaten = (tail &&
-                tcp_try_coalesce(sk, RCV_QUEUE, tail,
+                tcp_try_coalesce(sk, tail,
                                  skb, fragstolen)) ? 1 : 0;
        tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
        if (!eaten) {
 
  */
 struct netem_skb_cb {
        psched_time_t   time_to_send;
-       ktime_t         tstamp_save;
 };
 
 
                }
 
                cb->time_to_send = now + delay;
-               cb->tstamp_save = skb->tstamp;
                ++q->counter;
                tfifo_enqueue(skb, sch);
        } else {
                        qdisc_qstats_backlog_dec(sch, skb);
                        skb->next = NULL;
                        skb->prev = NULL;
-                       skb->tstamp = netem_skb_cb(skb)->tstamp_save;
+                       /* skb->dev shares skb->rbnode area,
+                        * we need to restore its value.
+                        */
+                       skb->dev = qdisc_dev(sch);
 
 #ifdef CONFIG_NET_CLS_ACT
                        /*