msk->token = subflow_req->token;
        msk->subflow = NULL;
 
-       if (unlikely(mptcp_token_new_accept(subflow_req->token, nsk))) {
-               nsk->sk_state = TCP_CLOSE;
-               bh_unlock_sock(nsk);
-
-               /* we can't call into mptcp_close() here - possible BH context
-                * free the sock directly.
-                * sk_clone_lock() sets nsk refcnt to two, hence call sk_free()
-                * too.
-                */
-               sk_common_release(nsk);
-               sk_free(nsk);
-               return NULL;
-       }
-
        msk->write_seq = subflow_req->idsn + 1;
        atomic64_set(&msk->snd_una, msk->write_seq);
        if (mp_opt->mp_capable) {
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
 
-       mptcp_token_destroy(msk->token);
+       mptcp_token_destroy(msk);
        if (msk->cached_ext)
                __skb_ext_put(msk->cached_ext);
 
        }
 }
 
+static int mptcp_hash(struct sock *sk)
+{
+       /* should never be called,
+        * we hash the TCP subflows not the master socket
+        */
+       WARN_ON_ONCE(1);
+       return 0;
+}
+
+static void mptcp_unhash(struct sock *sk)
+{
+       /* called from sk_common_release(), but nothing to do here */
+}
+
 static int mptcp_get_port(struct sock *sk, unsigned short snum)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
         */
        WRITE_ONCE(msk->remote_key, subflow->remote_key);
        WRITE_ONCE(msk->local_key, subflow->local_key);
-       WRITE_ONCE(msk->token, subflow->token);
        WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
        WRITE_ONCE(msk->ack_seq, ack_seq);
        WRITE_ONCE(msk->can_ack, 1);
        .sendmsg        = mptcp_sendmsg,
        .recvmsg        = mptcp_recvmsg,
        .release_cb     = mptcp_release_cb,
-       .hash           = inet_hash,
-       .unhash         = inet_unhash,
+       .hash           = mptcp_hash,
+       .unhash         = mptcp_unhash,
        .get_port       = mptcp_get_port,
        .sockets_allocated      = &mptcp_sockets_allocated,
        .memory_allocated       = &tcp_memory_allocated,
        .sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_tcp_wmem),
        .sysctl_mem     = sysctl_tcp_mem,
        .obj_size       = sizeof(struct mptcp_sock),
+       .slab_flags     = SLAB_TYPESAFE_BY_RCU,
        .no_autobind    = true,
 };
 
                                int addr_len, int flags)
 {
        struct mptcp_sock *msk = mptcp_sk(sock->sk);
+       struct mptcp_subflow_context *subflow;
        struct socket *ssock;
        int err;
 
                goto do_connect;
        }
 
+       mptcp_token_destroy(msk);
        ssock = __mptcp_socket_create(msk, TCP_SYN_SENT);
        if (IS_ERR(ssock)) {
                err = PTR_ERR(ssock);
                goto unlock;
        }
 
+       subflow = mptcp_subflow_ctx(ssock->sk);
 #ifdef CONFIG_TCP_MD5SIG
        /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
         * TCP option space.
         */
        if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
-               mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0;
+               subflow->request_mptcp = 0;
 #endif
+       if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk))
+               subflow->request_mptcp = 0;
 
 do_connect:
        err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
        pr_debug("msk=%p", msk);
 
        lock_sock(sock->sk);
+       mptcp_token_destroy(msk);
        ssock = __mptcp_socket_create(msk, TCP_LISTEN);
        if (IS_ERR(ssock)) {
                err = PTR_ERR(ssock);
 
        mptcp_subflow_init();
        mptcp_pm_init();
+       mptcp_token_init();
 
        if (proto_register(&mptcp_prot, 1) != 0)
                panic("Failed to register MPTCP proto.\n");
 
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/radix-tree.h>
+#include <linux/memblock.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <net/sock.h>
 #include <net/mptcp.h>
 #include "protocol.h"
 
-static RADIX_TREE(token_tree, GFP_ATOMIC);
-static RADIX_TREE(token_req_tree, GFP_ATOMIC);
-static DEFINE_SPINLOCK(token_tree_lock);
-static int token_used __read_mostly;
+#define TOKEN_MAX_RETRIES      4
+#define TOKEN_MAX_CHAIN_LEN    4
+
+struct token_bucket {
+       spinlock_t              lock;
+       int                     chain_len;
+       struct hlist_nulls_head req_chain;
+       struct hlist_nulls_head msk_chain;
+};
+
+static struct token_bucket *token_hash __read_mostly;
+static unsigned int token_mask __read_mostly;
+
+static struct token_bucket *token_bucket(u32 token)
+{
+       return &token_hash[token & token_mask];
+}
+
+/* called with bucket lock held */
+static struct mptcp_subflow_request_sock *
+__token_lookup_req(struct token_bucket *t, u32 token)
+{
+       struct mptcp_subflow_request_sock *req;
+       struct hlist_nulls_node *pos;
+
+       hlist_nulls_for_each_entry_rcu(req, pos, &t->req_chain, token_node)
+               if (req->token == token)
+                       return req;
+       return NULL;
+}
+
+/* called with bucket lock held */
+static struct mptcp_sock *
+__token_lookup_msk(struct token_bucket *t, u32 token)
+{
+       struct hlist_nulls_node *pos;
+       struct sock *sk;
+
+       sk_nulls_for_each_rcu(sk, pos, &t->msk_chain)
+               if (mptcp_sk(sk)->token == token)
+                       return mptcp_sk(sk);
+       return NULL;
+}
+
+static bool __token_bucket_busy(struct token_bucket *t, u32 token)
+{
+       return !token || t->chain_len >= TOKEN_MAX_CHAIN_LEN ||
+              __token_lookup_req(t, token) || __token_lookup_msk(t, token);
+}
 
 /**
  * mptcp_token_new_request - create new key/idsn/token for subflow_request
 int mptcp_token_new_request(struct request_sock *req)
 {
        struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
-       int err;
-
-       while (1) {
-               u32 token;
-
-               mptcp_crypto_key_gen_sha(&subflow_req->local_key,
-                                        &subflow_req->token,
-                                        &subflow_req->idsn);
-               pr_debug("req=%p local_key=%llu, token=%u, idsn=%llu\n",
-                        req, subflow_req->local_key, subflow_req->token,
-                        subflow_req->idsn);
-
-               token = subflow_req->token;
-               spin_lock_bh(&token_tree_lock);
-               if (!radix_tree_lookup(&token_req_tree, token) &&
-                   !radix_tree_lookup(&token_tree, token))
-                       break;
-               spin_unlock_bh(&token_tree_lock);
+       int retries = TOKEN_MAX_RETRIES;
+       struct token_bucket *bucket;
+       u32 token;
+
+again:
+       mptcp_crypto_key_gen_sha(&subflow_req->local_key,
+                                &subflow_req->token,
+                                &subflow_req->idsn);
+       pr_debug("req=%p local_key=%llu, token=%u, idsn=%llu\n",
+                req, subflow_req->local_key, subflow_req->token,
+                subflow_req->idsn);
+
+       token = subflow_req->token;
+       bucket = token_bucket(token);
+       spin_lock_bh(&bucket->lock);
+       if (__token_bucket_busy(bucket, token)) {
+               spin_unlock_bh(&bucket->lock);
+               if (!--retries)
+                       return -EBUSY;
+               goto again;
        }
 
-       err = radix_tree_insert(&token_req_tree,
-                               subflow_req->token, &token_used);
-       spin_unlock_bh(&token_tree_lock);
-       return err;
+       hlist_nulls_add_head_rcu(&subflow_req->token_node, &bucket->req_chain);
+       bucket->chain_len++;
+       spin_unlock_bh(&bucket->lock);
+       return 0;
 }
 
 /**
 int mptcp_token_new_connect(struct sock *sk)
 {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
-       struct sock *mptcp_sock = subflow->conn;
-       int err;
-
-       while (1) {
-               u32 token;
+       struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+       int retries = TOKEN_MAX_RETRIES;
+       struct token_bucket *bucket;
 
-               mptcp_crypto_key_gen_sha(&subflow->local_key, &subflow->token,
-                                        &subflow->idsn);
+       pr_debug("ssk=%p, local_key=%llu, token=%u, idsn=%llu\n",
+                sk, subflow->local_key, subflow->token, subflow->idsn);
 
-               pr_debug("ssk=%p, local_key=%llu, token=%u, idsn=%llu\n",
-                        sk, subflow->local_key, subflow->token, subflow->idsn);
+again:
+       mptcp_crypto_key_gen_sha(&subflow->local_key, &subflow->token,
+                                &subflow->idsn);
 
-               token = subflow->token;
-               spin_lock_bh(&token_tree_lock);
-               if (!radix_tree_lookup(&token_req_tree, token) &&
-                   !radix_tree_lookup(&token_tree, token))
-                       break;
-               spin_unlock_bh(&token_tree_lock);
+       bucket = token_bucket(subflow->token);
+       spin_lock_bh(&bucket->lock);
+       if (__token_bucket_busy(bucket, subflow->token)) {
+               spin_unlock_bh(&bucket->lock);
+               if (!--retries)
+                       return -EBUSY;
+               goto again;
        }
-       err = radix_tree_insert(&token_tree, subflow->token, mptcp_sock);
-       spin_unlock_bh(&token_tree_lock);
 
-       return err;
+       WRITE_ONCE(msk->token, subflow->token);
+       __sk_nulls_add_node_rcu((struct sock *)msk, &bucket->msk_chain);
+       bucket->chain_len++;
+       spin_unlock_bh(&bucket->lock);
+       return 0;
 }
 
 /**
- * mptcp_token_new_accept - insert token for later processing
- * @token: the token to insert to the tree
- * @conn: the just cloned socket linked to the new connection
+ * mptcp_token_accept - replace a req sk with full sock in token hash
+ * @req: the request socket to be removed
+ * @msk: the just cloned socket linked to the new connection
  *
  * Called when a SYN packet creates a new logical connection, i.e.
  * is not a join request.
  */
-int mptcp_token_new_accept(u32 token, struct sock *conn)
+void mptcp_token_accept(struct mptcp_subflow_request_sock *req,
+                       struct mptcp_sock *msk)
 {
-       int err;
+       struct mptcp_subflow_request_sock *pos;
+       struct token_bucket *bucket;
 
-       spin_lock_bh(&token_tree_lock);
-       err = radix_tree_insert(&token_tree, token, conn);
-       spin_unlock_bh(&token_tree_lock);
+       bucket = token_bucket(req->token);
+       spin_lock_bh(&bucket->lock);
 
-       return err;
+       /* pedantic lookup check for the moved token */
+       pos = __token_lookup_req(bucket, req->token);
+       if (!WARN_ON_ONCE(pos != req))
+               hlist_nulls_del_init_rcu(&req->token_node);
+       __sk_nulls_add_node_rcu((struct sock *)msk, &bucket->msk_chain);
+       spin_unlock_bh(&bucket->lock);
 }
 
 /**
  */
 struct mptcp_sock *mptcp_token_get_sock(u32 token)
 {
-       struct sock *conn;
-
-       spin_lock_bh(&token_tree_lock);
-       conn = radix_tree_lookup(&token_tree, token);
-       if (conn) {
-               /* token still reserved? */
-               if (conn == (struct sock *)&token_used)
-                       conn = NULL;
-               else
-                       sock_hold(conn);
+       struct hlist_nulls_node *pos;
+       struct token_bucket *bucket;
+       struct mptcp_sock *msk;
+       struct sock *sk;
+
+       rcu_read_lock();
+       bucket = token_bucket(token);
+
+again:
+       sk_nulls_for_each_rcu(sk, pos, &bucket->msk_chain) {
+               msk = mptcp_sk(sk);
+               if (READ_ONCE(msk->token) != token)
+                       continue;
+               if (!refcount_inc_not_zero(&sk->sk_refcnt))
+                       goto not_found;
+               if (READ_ONCE(msk->token) != token) {
+                       sock_put(sk);
+                       goto again;
+               }
+               goto found;
        }
-       spin_unlock_bh(&token_tree_lock);
+       if (get_nulls_value(pos) != (token & token_mask))
+               goto again;
+
+not_found:
+       msk = NULL;
 
-       return mptcp_sk(conn);
+found:
+       rcu_read_unlock();
+       return msk;
 }
 
 /**
  * mptcp_token_destroy_request - remove mptcp connection/token
- * @token: token of mptcp connection to remove
+ * @req: mptcp request socket dropping the token
  *
- * Remove not-yet-fully-established incoming connection identified
- * by @token.
+ * Remove the token associated to @req.
  */
-void mptcp_token_destroy_request(u32 token)
+void mptcp_token_destroy_request(struct request_sock *req)
 {
-       spin_lock_bh(&token_tree_lock);
-       radix_tree_delete(&token_req_tree, token);
-       spin_unlock_bh(&token_tree_lock);
+       struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+       struct mptcp_subflow_request_sock *pos;
+       struct token_bucket *bucket;
+
+       if (hlist_nulls_unhashed(&subflow_req->token_node))
+               return;
+
+       bucket = token_bucket(subflow_req->token);
+       spin_lock_bh(&bucket->lock);
+       pos = __token_lookup_req(bucket, subflow_req->token);
+       if (!WARN_ON_ONCE(pos != subflow_req)) {
+               hlist_nulls_del_init_rcu(&pos->token_node);
+               bucket->chain_len--;
+       }
+       spin_unlock_bh(&bucket->lock);
 }
 
 /**
  * mptcp_token_destroy - remove mptcp connection/token
- * @token: token of mptcp connection to remove
+ * @msk: mptcp connection dropping the token
  *
- * Remove the connection identified by @token.
+ * Remove the token associated to @msk
  */
-void mptcp_token_destroy(u32 token)
+void mptcp_token_destroy(struct mptcp_sock *msk)
 {
-       spin_lock_bh(&token_tree_lock);
-       radix_tree_delete(&token_tree, token);
-       spin_unlock_bh(&token_tree_lock);
+       struct token_bucket *bucket;
+       struct mptcp_sock *pos;
+
+       if (sk_unhashed((struct sock *)msk))
+               return;
+
+       bucket = token_bucket(msk->token);
+       spin_lock_bh(&bucket->lock);
+       pos = __token_lookup_msk(bucket, msk->token);
+       if (!WARN_ON_ONCE(pos != msk)) {
+               __sk_nulls_del_node_init_rcu((struct sock *)pos);
+               bucket->chain_len--;
+       }
+       spin_unlock_bh(&bucket->lock);
+}
+
+void __init mptcp_token_init(void)
+{
+       int i;
+
+       token_hash = alloc_large_system_hash("MPTCP token",
+                                            sizeof(struct token_bucket),
+                                            0,
+                                            20,/* one slot per 1MB of memory */
+                                            0,
+                                            NULL,
+                                            &token_mask,
+                                            0,
+                                            64 * 1024);
+       for (i = 0; i < token_mask + 1; ++i) {
+               INIT_HLIST_NULLS_HEAD(&token_hash[i].req_chain, i);
+               INIT_HLIST_NULLS_HEAD(&token_hash[i].msk_chain, i);
+               spin_lock_init(&token_hash[i].lock);
+       }
 }