int tcp_register_congestion_control(struct tcp_congestion_ops *type);
 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
 
+void tcp_assign_congestion_control(struct sock *sk);
 void tcp_init_congestion_control(struct sock *sk);
 void tcp_cleanup_congestion_control(struct sock *sk);
 int tcp_set_default_congestion_control(const char *name);
 int tcp_slow_start(struct tcp_sock *tp, u32 acked);
 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
 
-extern struct tcp_congestion_ops tcp_init_congestion_ops;
 u32 tcp_reno_ssthresh(struct sock *sk);
 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
 extern struct tcp_congestion_ops tcp_reno;
 
 
        tp->reordering = sysctl_tcp_reordering;
        tcp_enable_early_retrans(tp);
-       icsk->icsk_ca_ops = &tcp_init_congestion_ops;
+       tcp_assign_congestion_control(sk);
 
        tp->tsoffset = 0;
 
                tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
 
        tcp_metrics_init();
-
-       tcp_register_congestion_control(&tcp_reno);
-
+       BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
        tcp_tasklet_init();
 }
 
 EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
 
 /* Assign choice of congestion control. */
-void tcp_init_congestion_control(struct sock *sk)
+void tcp_assign_congestion_control(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_congestion_ops *ca;
 
-       /* if no choice made yet assign the current value set as default */
-       if (icsk->icsk_ca_ops == &tcp_init_congestion_ops) {
-               rcu_read_lock();
-               list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
-                       if (try_module_get(ca->owner)) {
-                               icsk->icsk_ca_ops = ca;
-                               break;
-                       }
-
-                       /* fallback to next available */
+       rcu_read_lock();
+       list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
+               if (likely(try_module_get(ca->owner))) {
+                       icsk->icsk_ca_ops = ca;
+                       goto out;
                }
-               rcu_read_unlock();
+               /* Fallback to next available. The last really
+                * guaranteed fallback is Reno from this list.
+                */
        }
+out:
+       rcu_read_unlock();
+
+       /* Clear out private data before diag gets it and
+        * the ca has not been initialized.
+        */
+       if (ca->get_info)
+               memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
+}
+
+void tcp_init_congestion_control(struct sock *sk)
+{
+       const struct inet_connection_sock *icsk = inet_csk(sk);
 
        if (icsk->icsk_ca_ops->init)
                icsk->icsk_ca_ops->init(sk);
        .ssthresh       = tcp_reno_ssthresh,
        .cong_avoid     = tcp_reno_cong_avoid,
 };
-
-/* Initial congestion control used (until SYN)
- * really reno under another name so we can tell difference
- * during tcp_set_default_congestion_control
- */
-struct tcp_congestion_ops tcp_init_congestion_ops  = {
-       .name           = "",
-       .owner          = THIS_MODULE,
-       .ssthresh       = tcp_reno_ssthresh,
-       .cong_avoid     = tcp_reno_cong_avoid,
-};
-EXPORT_SYMBOL_GPL(tcp_init_congestion_ops);
 
                newtp->snd_cwnd = TCP_INIT_CWND;
                newtp->snd_cwnd_cnt = 0;
 
-               if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops &&
-                   !try_module_get(newicsk->icsk_ca_ops->owner))
-                       newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
+               if (!try_module_get(newicsk->icsk_ca_ops->owner))
+                       tcp_assign_congestion_control(newsk);
 
                tcp_set_ca_state(newsk, TCP_CA_Open);
                tcp_init_xmit_timers(newsk);