/* And these are ours. */
        unsigned int            tw_ipv6only     : 1,
                                tw_transparent  : 1,
-                               tw_pad          : 14,   /* 14 bits hole */
+                               tw_pad          : 6,    /* 6 bits hole */
+                               tw_tos          : 8,
                                tw_ipv6_offset  : 16;
        kmemcheck_bitfield_end(flags);
        unsigned long           tw_ttd;
 
        int         csumoffset; /* u16 offset of csum in iov[0].iov_base */
                                /* -1 if not needed */ 
        int         bound_dev_if;
+       u8          tos;
 }; 
 
 #define IP_REPLY_ARG_NOSRCCHECK 1
 }
 
 void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
-                  struct ip_reply_arg *arg, unsigned int len);
+                  const struct ip_reply_arg *arg, unsigned int len);
 
 struct ipv4_config {
        int     log_martians;
 
                tw->tw_daddr        = inet->inet_daddr;
                tw->tw_rcv_saddr    = inet->inet_rcv_saddr;
                tw->tw_bound_dev_if = sk->sk_bound_dev_if;
+               tw->tw_tos          = inet->tos;
                tw->tw_num          = inet->inet_num;
                tw->tw_state        = TCP_TIME_WAIT;
                tw->tw_substate     = state;
 
  *             structure to pass arguments.
  */
 void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
-                  struct ip_reply_arg *arg, unsigned int len)
+                  const struct ip_reply_arg *arg, unsigned int len)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct ip_options_data replyopts;
        }
 
        flowi4_init_output(&fl4, arg->bound_dev_if, 0,
-                          RT_TOS(ip_hdr(skb)->tos),
+                          RT_TOS(arg->tos),
                           RT_SCOPE_UNIVERSE, sk->sk_protocol,
                           ip_reply_arg_flowi_flags(arg),
                           daddr, rt->rt_spec_dst,
           with locally disabled BH and that sk cannot be already spinlocked.
         */
        bh_lock_sock(sk);
-       inet->tos = ip_hdr(skb)->tos;
+       inet->tos = arg->tos;
        sk->sk_priority = skb->priority;
        sk->sk_protocol = ip_hdr(skb)->protocol;
        sk->sk_bound_dev_if = arg->bound_dev_if;
 
        arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
 
        net = dev_net(skb_dst(skb)->dev);
+       arg.tos = ip_hdr(skb)->tos;
        ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
                      &arg, arg.iov[0].iov_len);
 
 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
                            u32 win, u32 ts, int oif,
                            struct tcp_md5sig_key *key,
-                           int reply_flags)
+                           int reply_flags, u8 tos)
 {
        const struct tcphdr *th = tcp_hdr(skb);
        struct {
        arg.csumoffset = offsetof(struct tcphdr, check) / 2;
        if (oif)
                arg.bound_dev_if = oif;
-
+       arg.tos = tos;
        ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
                      &arg, arg.iov[0].iov_len);
 
                        tcptw->tw_ts_recent,
                        tw->tw_bound_dev_if,
                        tcp_twsk_md5_key(tcptw),
-                       tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
+                       tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
+                       tw->tw_tos
                        );
 
        inet_twsk_put(tw);
                        req->ts_recent,
                        0,
                        tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
-                       inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
+                       inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
+                       ip_hdr(skb)->tos);
 }
 
 /*