struct bucket_table {
        unsigned int            size;
        unsigned int            nest;
-       unsigned int            rehash;
        u32                     hash_rnd;
        unsigned int            locks_mask;
        spinlock_t              *locks;
  * @obj:       pointer to hash head inside object
  * @params:    hash table parameters
  *
- * Locks down the bucket chain in both the old and new table if a resize
- * is in progress to ensure that writers can't remove from the old table
- * and can't insert to the new table during the atomic operation of search
- * and insertion. Searches for duplicates in both the old and new table if
- * a resize is in progress.
- *
  * This lookup function may only be used for fixed key hash table (key_len
  * parameter set). It will BUG() if used inappropriately.
  *
  * @obj:       pointer to hash head inside object
  * @params:    hash table parameters
  *
- * Locks down the bucket chain in both the old and new table if a resize
- * is in progress to ensure that writers can't remove from the old table
- * and can't insert to the new table during the atomic operation of search
- * and insertion. Searches for duplicates in both the old and new table if
- * a resize is in progress.
- *
  * Lookups may occur in parallel with hashtable mutations and resizing.
  *
  * Will trigger an automatic deferred table resizing if residency in the
 
                return NULL;
        }
 
+       rcu_head_init(&tbl->rcu);
        INIT_LIST_HEAD(&tbl->walkers);
 
        tbl->hash_rnd = get_random_u32();
        while (!(err = rhashtable_rehash_one(ht, old_hash)))
                ;
 
-       if (err == -ENOENT) {
-               old_tbl->rehash++;
+       if (err == -ENOENT)
                err = 0;
-       }
+
        spin_unlock_bh(old_bucket_lock);
 
        return err;
        spin_lock(&ht->lock);
        list_for_each_entry(walker, &old_tbl->walkers, list)
                walker->tbl = NULL;
-       spin_unlock(&ht->lock);
 
        /* Wait for readers. All new readers will see the new
         * table, and thus no references to the old table will
         * remain.
+        * We do this inside the locked region so that
+        * rhashtable_walk_stop() can use rcu_head_after_call_rcu()
+        * to check if it should not re-link the table.
         */
        call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
+       spin_unlock(&ht->lock);
 
        return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
 }
        struct bucket_table *new_tbl;
        struct bucket_table *tbl;
        unsigned int hash;
-       spinlock_t *lock;
        void *data;
 
-       tbl = rcu_dereference(ht->tbl);
-
-       /* All insertions must grab the oldest table containing
-        * the hashed bucket that is yet to be rehashed.
-        */
-       for (;;) {
-               hash = rht_head_hashfn(ht, tbl, obj, ht->p);
-               lock = rht_bucket_lock(tbl, hash);
-               spin_lock_bh(lock);
-
-               if (tbl->rehash <= hash)
-                       break;
-
-               spin_unlock_bh(lock);
-               tbl = rht_dereference_rcu(tbl->future_tbl, ht);
-       }
-
-       data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
-       new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
-       if (PTR_ERR(new_tbl) != -EEXIST)
-               data = ERR_CAST(new_tbl);
+       new_tbl = rcu_dereference(ht->tbl);
 
-       while (!IS_ERR_OR_NULL(new_tbl)) {
+       do {
                tbl = new_tbl;
                hash = rht_head_hashfn(ht, tbl, obj, ht->p);
-               spin_lock_nested(rht_bucket_lock(tbl, hash),
-                                SINGLE_DEPTH_NESTING);
+               spin_lock_bh(rht_bucket_lock(tbl, hash));
 
                data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
                new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
                if (PTR_ERR(new_tbl) != -EEXIST)
                        data = ERR_CAST(new_tbl);
 
-               spin_unlock(rht_bucket_lock(tbl, hash));
-       }
-
-       spin_unlock_bh(lock);
+               spin_unlock_bh(rht_bucket_lock(tbl, hash));
+       } while (!IS_ERR_OR_NULL(new_tbl));
 
        if (PTR_ERR(data) == -EAGAIN)
                data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
        ht = iter->ht;
 
        spin_lock(&ht->lock);
-       if (tbl->rehash < tbl->size)
-               list_add(&iter->walker.list, &tbl->walkers);
-       else
+       if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu))
+               /* This bucket table is being freed, don't re-link it. */
                iter->walker.tbl = NULL;
+       else
+               list_add(&iter->walker.list, &tbl->walkers);
        spin_unlock(&ht->lock);
 
 out: