return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
 }
 
-static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
-                             struct sk_buff *skb)
+static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 
        skb->csum    = 0;
        }
 }
 
-static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
-                           int mss_now, int nonagle)
+static inline void tcp_push(struct sock *sk, int flags, int mss_now,
+                           int nonagle)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (tcp_send_head(sk)) {
                struct sk_buff *skb = tcp_write_queue_tail(sk);
                if (!(flags & MSG_MORE) || forced_push(tp))
                        tcp_mark_push(tp, skb);
                tcp_mark_urg(tp, flags, skb);
-               __tcp_push_pending_frames(sk, tp, mss_now,
+               __tcp_push_pending_frames(sk, mss_now,
                                          (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
        }
 }
                        if (!skb)
                                goto wait_for_memory;
 
-                       skb_entail(sk, tp, skb);
+                       skb_entail(sk, skb);
                        copy = size_goal;
                }
 
 
                if (forced_push(tp)) {
                        tcp_mark_push(tp, skb);
-                       __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
+                       __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
                } else if (skb == tcp_send_head(sk))
                        tcp_push_one(sk, mss_now);
                continue;
                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 wait_for_memory:
                if (copied)
-                       tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
+                       tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
 
                if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
                        goto do_error;
 
 out:
        if (copied)
-               tcp_push(sk, tp, flags, mss_now, tp->nonagle);
+               tcp_push(sk, flags, mss_now, tp->nonagle);
        return copied;
 
 do_error:
 #define TCP_PAGE(sk)   (sk->sk_sndmsg_page)
 #define TCP_OFF(sk)    (sk->sk_sndmsg_off)
 
-static inline int select_size(struct sock *sk, struct tcp_sock *tp)
+static inline int select_size(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        int tmp = tp->mss_cache;
 
        if (sk->sk_route_caps & NETIF_F_SG) {
                                if (!sk_stream_memory_free(sk))
                                        goto wait_for_sndbuf;
 
-                               skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
+                               skb = sk_stream_alloc_pskb(sk, select_size(sk),
                                                           0, sk->sk_allocation);
                                if (!skb)
                                        goto wait_for_memory;
                                if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
                                        skb->ip_summed = CHECKSUM_PARTIAL;
 
-                               skb_entail(sk, tp, skb);
+                               skb_entail(sk, skb);
                                copy = size_goal;
                        }
 
 
                        if (forced_push(tp)) {
                                tcp_mark_push(tp, skb);
-                               __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
+                               __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
                        } else if (skb == tcp_send_head(sk))
                                tcp_push_one(sk, mss_now);
                        continue;
                        set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 wait_for_memory:
                        if (copied)
-                               tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
+                               tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
 
                        if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
                                goto do_error;
 
 out:
        if (copied)
-               tcp_push(sk, tp, flags, mss_now, tp->nonagle);
+               tcp_push(sk, flags, mss_now, tp->nonagle);
        TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return copied;
 skip_copy:
                if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
                        tp->urg_data = 0;
-                       tcp_fast_path_check(sk, tp);
+                       tcp_fast_path_check(sk);
                }
                if (used + offset < skb->len)
                        continue;
                         * for currently queued segments.
                         */
                        tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
-                       tcp_push_pending_frames(sk, tp);
+                       tcp_push_pending_frames(sk);
                } else {
                        tp->nonagle &= ~TCP_NAGLE_OFF;
                }
                        tp->nonagle &= ~TCP_NAGLE_CORK;
                        if (tp->nonagle&TCP_NAGLE_OFF)
                                tp->nonagle |= TCP_NAGLE_PUSH;
-                       tcp_push_pending_frames(sk, tp);
+                       tcp_push_pending_frames(sk);
                }
                break;
 
 
  */
 
 /* Slow part of check#2. */
-static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
-                            const struct sk_buff *skb)
+static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        /* Optimize this! */
        int truesize = tcp_win_from_space(skb->truesize)/2;
        int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2;
        return 0;
 }
 
-static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
+static void tcp_grow_window(struct sock *sk,
                            struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        /* Check #1 */
        if (tp->rcv_ssthresh < tp->window_clamp &&
            (int)tp->rcv_ssthresh < tcp_space(sk) &&
                if (tcp_win_from_space(skb->truesize) <= skb->len)
                        incr = 2*tp->advmss;
                else
-                       incr = __tcp_grow_window(sk, tp, skb);
+                       incr = __tcp_grow_window(sk, skb);
 
                if (incr) {
                        tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
 }
 
 /* 5. Recalculate window clamp after socket hit its memory bounds. */
-static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
+static void tcp_clamp_window(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
 
        icsk->icsk_ack.quick = 0;
  * each ACK we send, he increments snd_cwnd and transmits more of his
  * queue.  -DaveM
  */
-static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
+static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
        u32 now;
 
        TCP_ECN_check_ce(tp, skb);
 
        if (skb->len >= 128)
-               tcp_grow_window(sk, tp, skb);
+               tcp_grow_window(sk, skb);
 }
 
 /* Called to compute a smoothed rtt estimate. The data fed to this
        return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
 }
 
-static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
+static inline int tcp_head_timedout(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        return tp->packets_out &&
               tcp_skb_timedout(sk, tcp_write_queue_head(sk));
 }
  * Main question: may we further continue forward transmission
  * with the same cwnd?
  */
-static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
+static int tcp_time_to_recover(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        __u32 packets_out;
 
        /* Do not perform any recovery during FRTO algorithm */
        /* Trick#3 : when we use RFC2988 timer restart, fast
         * retransmit can be triggered by timeout of queue head.
         */
-       if (tcp_head_timedout(sk, tp))
+       if (tcp_head_timedout(sk))
                return 1;
 
        /* Trick#4: It is still not OK... But will it be useful to delay
        packets_out = tp->packets_out;
        if (packets_out <= tp->reordering &&
            tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) &&
-           !tcp_may_send_now(sk, tp)) {
+           !tcp_may_send_now(sk)) {
                /* We have nothing to send. This connection is limited
                 * either by receiver window or by application.
                 */
 
 /* Account for ACK, ACKing some data in Reno Recovery phase. */
 
-static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acked)
+static void tcp_remove_reno_sacks(struct sock *sk, int acked)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (acked > 0) {
                /* One ACK acked hole. The rest eat duplicate ACKs. */
                if (acked-1 >= tp->sacked_out)
 }
 
 /* Mark head of queue up as lost. */
-static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
+static void tcp_mark_head_lost(struct sock *sk,
                               int packets, u32 high_seq)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
        int cnt;
 
 
 /* Account newly detected lost packet(s) */
 
-static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
+static void tcp_update_scoreboard(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (IsFack(tp)) {
                int lost = tp->fackets_out - tp->reordering;
                if (lost <= 0)
                        lost = 1;
-               tcp_mark_head_lost(sk, tp, lost, tp->high_seq);
+               tcp_mark_head_lost(sk, lost, tp->high_seq);
        } else {
-               tcp_mark_head_lost(sk, tp, 1, tp->high_seq);
+               tcp_mark_head_lost(sk, 1, tp->high_seq);
        }
 
        /* New heuristics: it is possible only after we switched
         * Hence, we can detect timed out packets during fast
         * retransmit without falling to slow start.
         */
-       if (!IsReno(tp) && tcp_head_timedout(sk, tp)) {
+       if (!IsReno(tp) && tcp_head_timedout(sk)) {
                struct sk_buff *skb;
 
                skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
 /* Undo procedures. */
 
 #if FASTRETRANS_DEBUG > 1
-static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg)
+static void DBGUNDO(struct sock *sk, const char *msg)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct inet_sock *inet = inet_sk(sk);
+
        printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n",
               msg,
               NIPQUAD(inet->daddr), ntohs(inet->dport),
 }
 
 /* People celebrate: "We love our President!" */
-static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
+static int tcp_try_undo_recovery(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (tcp_may_undo(tp)) {
                /* Happy end! We did not retransmit anything
                 * or our original transmission succeeded.
                 */
-               DBGUNDO(sk, tp, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
+               DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
                tcp_undo_cwr(sk, 1);
                if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
                        NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
 }
 
 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
-static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp)
+static void tcp_try_undo_dsack(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (tp->undo_marker && !tp->undo_retrans) {
-               DBGUNDO(sk, tp, "D-SACK");
+               DBGUNDO(sk, "D-SACK");
                tcp_undo_cwr(sk, 1);
                tp->undo_marker = 0;
                NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
 
 /* Undo during fast recovery after partial ACK. */
 
-static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
-                               int acked)
+static int tcp_try_undo_partial(struct sock *sk, int acked)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        /* Partial ACK arrived. Force Hoe's retransmit. */
        int failed = IsReno(tp) || tp->fackets_out>tp->reordering;
 
 
                tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
 
-               DBGUNDO(sk, tp, "Hoe");
+               DBGUNDO(sk, "Hoe");
                tcp_undo_cwr(sk, 0);
                NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
 
 }
 
 /* Undo during loss recovery after partial ACK. */
-static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
+static int tcp_try_undo_loss(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (tcp_may_undo(tp)) {
                struct sk_buff *skb;
                tcp_for_write_queue(skb, sk) {
 
                clear_all_retrans_hints(tp);
 
-               DBGUNDO(sk, tp, "partial loss");
+               DBGUNDO(sk, "partial loss");
                tp->lost_out = 0;
                tp->left_out = tp->sacked_out;
                tcp_undo_cwr(sk, 1);
        tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
 }
 
-static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
+static void tcp_try_to_open(struct sock *sk, int flag)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        tp->left_out = tp->sacked_out;
 
        if (tp->retrans_out == 0)
            before(tp->snd_una, tp->high_seq) &&
            icsk->icsk_ca_state != TCP_CA_Open &&
            tp->fackets_out > tp->reordering) {
-               tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq);
+               tcp_mark_head_lost(sk, tp->fackets_out-tp->reordering, tp->high_seq);
                NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
        }
 
                switch (icsk->icsk_ca_state) {
                case TCP_CA_Loss:
                        icsk->icsk_retransmits = 0;
-                       if (tcp_try_undo_recovery(sk, tp))
+                       if (tcp_try_undo_recovery(sk))
                                return;
                        break;
 
                        break;
 
                case TCP_CA_Disorder:
-                       tcp_try_undo_dsack(sk, tp);
+                       tcp_try_undo_dsack(sk);
                        if (!tp->undo_marker ||
                            /* For SACK case do not Open to allow to undo
                             * catching for all duplicate ACKs. */
                case TCP_CA_Recovery:
                        if (IsReno(tp))
                                tcp_reset_reno_sack(tp);
-                       if (tcp_try_undo_recovery(sk, tp))
+                       if (tcp_try_undo_recovery(sk))
                                return;
                        tcp_complete_cwr(sk);
                        break;
                } else {
                        int acked = prior_packets - tp->packets_out;
                        if (IsReno(tp))
-                               tcp_remove_reno_sacks(sk, tp, acked);
-                       is_dupack = tcp_try_undo_partial(sk, tp, acked);
+                               tcp_remove_reno_sacks(sk, acked);
+                       is_dupack = tcp_try_undo_partial(sk, acked);
                }
                break;
        case TCP_CA_Loss:
                if (flag&FLAG_DATA_ACKED)
                        icsk->icsk_retransmits = 0;
-               if (!tcp_try_undo_loss(sk, tp)) {
+               if (!tcp_try_undo_loss(sk)) {
                        tcp_moderate_cwnd(tp);
                        tcp_xmit_retransmit_queue(sk);
                        return;
                }
 
                if (icsk->icsk_ca_state == TCP_CA_Disorder)
-                       tcp_try_undo_dsack(sk, tp);
+                       tcp_try_undo_dsack(sk);
 
-               if (!tcp_time_to_recover(sk, tp)) {
-                       tcp_try_to_open(sk, tp, flag);
+               if (!tcp_time_to_recover(sk)) {
+                       tcp_try_to_open(sk, flag);
                        return;
                }
 
                tcp_set_ca_state(sk, TCP_CA_Recovery);
        }
 
-       if (is_dupack || tcp_head_timedout(sk, tp))
-               tcp_update_scoreboard(sk, tp);
+       if (is_dupack || tcp_head_timedout(sk))
+               tcp_update_scoreboard(sk);
        tcp_cwnd_down(sk);
        tcp_xmit_retransmit_queue(sk);
 }
  * RFC2988 recommends to restart timer to now+rto.
  */
 
-static void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp)
+static void tcp_ack_packets_out(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (!tp->packets_out) {
                inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
        } else {
 
        if (acked&FLAG_ACKED) {
                tcp_ack_update_rtt(sk, acked, seq_rtt);
-               tcp_ack_packets_out(sk, tp);
+               tcp_ack_packets_out(sk);
                if (rtt_sample && !(acked & FLAG_RETRANS_DATA_ACKED))
                        (*rtt_sample)(sk, tcp_usrtt(&tv));
 
  * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
  * and in FreeBSD. NetBSD's one is even worse.) is wrong.
  */
-static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
-                                struct sk_buff *skb, u32 ack, u32 ack_seq)
+static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack,
+                                u32 ack_seq)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        int flag = 0;
        u32 nwin = ntohs(tcp_hdr(skb)->window);
 
                         * fast path is recovered for sending TCP.
                         */
                        tp->pred_flags = 0;
-                       tcp_fast_path_check(sk, tp);
+                       tcp_fast_path_check(sk);
 
                        if (nwin > tp->max_window) {
                                tp->max_window = nwin;
                else
                        NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
 
-               flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq);
+               flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
 
                if (TCP_SKB_CB(skb)->sacked)
                        flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
                }
                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
                if (skb->len)
-                       tcp_event_data_recv(sk, tp, skb);
+                       tcp_event_data_recv(sk, skb);
                if (th->fin)
                        tcp_fin(skb, sk, th);
 
                if (tp->rx_opt.num_sacks)
                        tcp_sack_remove(tp);
 
-               tcp_fast_path_check(sk, tp);
+               tcp_fast_path_check(sk);
 
                if (eaten > 0)
                        __kfree_skb(skb);
        NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
 
        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
-               tcp_clamp_window(sk, tp);
+               tcp_clamp_window(sk);
        else if (tcp_memory_pressure)
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
        tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
-static int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp)
+static int tcp_should_expand_sndbuf(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        /* If the user specified a specific send buffer setting, do
         * not modify it.
         */
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (tcp_should_expand_sndbuf(sk, tp)) {
+       if (tcp_should_expand_sndbuf(sk)) {
                int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
                        MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
                    demanded = max_t(unsigned int, tp->snd_cwnd,
        }
 }
 
-static inline void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp)
+static inline void tcp_data_snd_check(struct sock *sk)
 {
-       tcp_push_pending_frames(sk, tp);
+       tcp_push_pending_frames(sk);
        tcp_check_space(sk);
 }
 
                                 */
                                tcp_ack(sk, skb, 0);
                                __kfree_skb(skb);
-                               tcp_data_snd_check(sk, tp);
+                               tcp_data_snd_check(sk);
                                return 0;
                        } else { /* Header too small */
                                TCP_INC_STATS_BH(TCP_MIB_INERRS);
                                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
                        }
 
-                       tcp_event_data_recv(sk, tp, skb);
+                       tcp_event_data_recv(sk, skb);
 
                        if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
                                /* Well, only one small jumplet in fast path... */
                                tcp_ack(sk, skb, FLAG_DATA);
-                               tcp_data_snd_check(sk, tp);
+                               tcp_data_snd_check(sk);
                                if (!inet_csk_ack_scheduled(sk))
                                        goto no_ack;
                        }
        /* step 7: process the segment text */
        tcp_data_queue(sk, skb);
 
-       tcp_data_snd_check(sk, tp);
+       tcp_data_snd_check(sk);
        tcp_ack_snd_check(sk);
        return 0;
 
                /* Do step6 onward by hand. */
                tcp_urg(sk, skb, th);
                __kfree_skb(skb);
-               tcp_data_snd_check(sk, tp);
+               tcp_data_snd_check(sk);
                return 0;
        }
 
 
        /* tcp_data could move socket to TIME-WAIT */
        if (sk->sk_state != TCP_CLOSE) {
-               tcp_data_snd_check(sk, tp);
+               tcp_data_snd_check(sk);
                tcp_ack_snd_check(sk);
        }
 
 
 /* By default, RFC2861 behavior.  */
 int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
 
-static void update_send_head(struct sock *sk, struct tcp_sock *tp,
-                            struct sk_buff *skb)
+static void update_send_head(struct sock *sk, struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        tcp_advance_send_head(sk, skb);
        tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
-       tcp_packets_out_inc(sk, tp, skb);
+       tcp_packets_out_inc(sk, skb);
 }
 
 /* SND.NXT, if window was not shrunk.
  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
  * invalid. OK, let's make this for now:
  */
-static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp)
+static inline __u32 tcp_acceptable_seq(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
                return tp->snd_nxt;
        else
                                             md5 ? &md5_hash_location :
 #endif
                                             NULL);
-               TCP_ECN_send(sk, tp, skb, tcp_header_size);
+               TCP_ECN_send(sk, skb, tcp_header_size);
        }
 
 #ifdef CONFIG_TCP_MD5SIG
 
 /* Congestion window validation. (RFC2861) */
 
-static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
+static void tcp_cwnd_validate(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        __u32 packets_out = tp->packets_out;
 
        if (packets_out >= tp->snd_cwnd) {
        return cwnd_quota;
 }
 
-int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
+int tcp_may_send_now(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb = tcp_send_head(sk);
 
        return (skb &&
  *
  * This algorithm is from John Heffner.
  */
-static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
+static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 send_win, cong_win, limit, in_flight;
 
                /* Decrement cwnd here because we are sending
                * effectively two packets. */
                tp->snd_cwnd--;
-               update_send_head(sk, tp, nskb);
+               update_send_head(sk, nskb);
 
                icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
                tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
                                                      nonagle : TCP_NAGLE_PUSH))))
                                break;
                } else {
-                       if (tcp_tso_should_defer(sk, tp, skb))
+                       if (tcp_tso_should_defer(sk, skb))
                                break;
                }
 
                /* Advance the send_head.  This one is sent out.
                 * This call will increment packets_out.
                 */
-               update_send_head(sk, tp, skb);
+               update_send_head(sk, skb);
 
                tcp_minshall_update(tp, mss_now, skb);
                sent_pkts++;
        }
 
        if (likely(sent_pkts)) {
-               tcp_cwnd_validate(sk, tp);
+               tcp_cwnd_validate(sk);
                return 0;
        }
        return !tp->packets_out && tcp_send_head(sk);
  * TCP_CORK or attempt at coalescing tiny packets.
  * The socket must be locked by the caller.
  */
-void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
-                              unsigned int cur_mss, int nonagle)
+void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
+                              int nonagle)
 {
        struct sk_buff *skb = tcp_send_head(sk);
 
        if (skb) {
                if (tcp_write_xmit(sk, cur_mss, nonagle))
-                       tcp_check_probe_timer(sk, tp);
+                       tcp_check_probe_timer(sk);
        }
 }
 
                TCP_SKB_CB(skb)->when = tcp_time_stamp;
 
                if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
-                       update_send_head(sk, tp, skb);
-                       tcp_cwnd_validate(sk, tp);
+                       update_send_head(sk, skb);
+                       tcp_cwnd_validate(sk);
                        return;
                }
        }
         * segments to send.
         */
 
-       if (tcp_may_send_now(sk, tp))
+       if (tcp_may_send_now(sk))
                return;
 
        if (tp->forward_skb_hint) {
                TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
                tcp_queue_skb(sk, skb);
        }
-       __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF);
+       __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
 }
 
 /* We get here when a process closes a file descriptor (either due to
  */
 void tcp_send_active_reset(struct sock *sk, gfp_t priority)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
 
        /* NOTE: No TCP options attached and we never retransmit this. */
        skb_shinfo(skb)->gso_type = 0;
 
        /* Send it off. */
-       TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
+       TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk);
        TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
        TCP_SKB_CB(skb)->when = tcp_time_stamp;
        if (tcp_transmit_skb(sk, skb, 0, priority))
        skb_reserve(buff, MAX_TCP_HEADER);
 
        TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
-       TCP_ECN_send_syn(sk, tp, buff);
+       TCP_ECN_send_syn(sk, buff);
        TCP_SKB_CB(buff)->sacked = 0;
        skb_shinfo(buff)->gso_segs = 1;
        skb_shinfo(buff)->gso_size = 0;
 {
        /* If we have been reset, we may not send again. */
        if (sk->sk_state != TCP_CLOSE) {
-               struct tcp_sock *tp = tcp_sk(sk);
                struct sk_buff *buff;
 
                /* We are not putting this on the write queue, so
                skb_shinfo(buff)->gso_type = 0;
 
                /* Send it off, this clears delayed acks for us. */
-               TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
+               TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk);
                TCP_SKB_CB(buff)->when = tcp_time_stamp;
                tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
        }
                        TCP_SKB_CB(skb)->when = tcp_time_stamp;
                        err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
                        if (!err) {
-                               update_send_head(sk, tp, skb);
+                               update_send_head(sk, skb);
                        }
                        return err;
                } else {