DCCPF_SEQ_WMAX));
 }
 
+static void dccp_tasklet_schedule(struct sock *sk)
+{
+       struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
+
+       if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+               sock_hold(sk);
+               __tasklet_schedule(t);
+       }
+}
+
 static void ccid2_hc_tx_rto_expire(struct timer_list *t)
 {
        struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer);
 
        /* if we were blocked before, we may now send cwnd=1 packet */
        if (sender_was_blocked)
-               tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+               dccp_tasklet_schedule(sk);
        /* restart backed-off timer */
        sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
 out:
 done:
        /* check if incoming Acks allow pending packets to be sent */
        if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
-               tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+               dccp_tasklet_schedule(sk);
        dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
 }
 
 
        else
                dccp_write_xmit(sk);
        bh_unlock_sock(sk);
+       sock_put(sk);
 }
 
 static void dccp_write_xmit_timer(struct timer_list *t)
        struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk;
 
        dccp_write_xmitlet((unsigned long)sk);
-       sock_put(sk);
 }
 
 void dccp_init_xmit_timers(struct sock *sk)