unlock_buckets(new_tbl, old_tbl, new_hash);
        }
 
-       /* Publish the new table pointer. Lookups may now traverse
-        * the new table, but they will not benefit from any
-        * additional efficiency until later steps unzip the buckets.
-        */
-       rcu_assign_pointer(ht->tbl, new_tbl);
-
        /* Unzip interleaved hash chains */
        while (!complete && !ht->being_destroyed) {
                /* Wait for readers. All new readers will see the new
                }
        }
 
+       rcu_assign_pointer(ht->tbl, new_tbl);
        synchronize_rcu();
 
        bucket_table_free(old_tbl);
 {
        struct bucket_table *tbl, *new_tbl, *old_tbl;
        struct rhash_head __rcu **pprev;
-       struct rhash_head *he;
+       struct rhash_head *he, *he2;
        unsigned int hash, new_hash;
        bool ret = false;
 
        rcu_read_lock();
        tbl = old_tbl = rht_dereference_rcu(ht->tbl, ht);
        new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
-       new_hash = head_hashfn(ht, new_tbl, obj);
+       new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
 
        lock_buckets(new_tbl, old_tbl, new_hash);
 restart:
                }
 
                ASSERT_BUCKET_LOCK(ht, tbl, hash);
-               rcu_assign_pointer(*pprev, obj->next);
 
+               if (unlikely(new_tbl != tbl)) {
+                       rht_for_each_continue(he2, he->next, tbl, hash) {
+                               if (head_hashfn(ht, tbl, he2) == hash) {
+                                       rcu_assign_pointer(*pprev, he2);
+                                       goto found;
+                               }
+                       }
+
+                       INIT_RHT_NULLS_HEAD(*pprev, ht, hash);
+               } else {
+                       rcu_assign_pointer(*pprev, obj->next);
+               }
+
+found:
                ret = true;
                break;
        }