MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
 #endif
 
-struct cfs_wi_sched *cfs_sched_rehash;
+struct workqueue_struct *cfs_rehash_wq;
 
 static inline void
 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
        hs->hs_dep_bits = hs->hs_cur_bits;
        spin_unlock(&hs->hs_dep_lock);
 
-       cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
+       queue_work(cfs_rehash_wq, &hs->hs_dep_work);
 # endif
 }
 
  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
  *          - CFS_HASH_SORT enable chained hash sort
  */
-static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
+static void cfs_hash_rehash_worker(struct work_struct *work);
 
 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
-static int cfs_hash_dep_print(struct cfs_workitem *wi)
+static void cfs_hash_dep_print(struct work_struct *work)
 {
-       struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
+       struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_dep_work);
        int dep;
        int bkt;
        int off;
 static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
 {
        spin_lock_init(&hs->hs_dep_lock);
-       cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
+       INIT_WORK(&hs->hs_dep_work, cfs_hash_dep_print);
 }
 
 static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
 {
-       if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
-               return;
-
-       spin_lock(&hs->hs_dep_lock);
-       while (hs->hs_dep_bits) {
-               spin_unlock(&hs->hs_dep_lock);
-               cond_resched();
-               spin_lock(&hs->hs_dep_lock);
-       }
-       spin_unlock(&hs->hs_dep_lock);
+       cancel_work_sync(&hs->hs_dep_work);
 }
 
 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
        hs->hs_ops = ops;
        hs->hs_extra_bytes = extra_bytes;
        hs->hs_rehash_bits = 0;
-       cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
+       INIT_WORK(&hs->hs_rehash_work, cfs_hash_rehash_worker);
        cfs_hash_depth_wi_init(hs);
 
        if (cfs_hash_with_rehash(hs))
 
        cfs_hash_lock(hs, 1);
        hs->hs_iterators++;
+       cfs_hash_unlock(hs, 1);
 
        /* NB: iteration is mostly called by service thread,
         * we tend to cancel pending rehash-request, instead of
         * after iteration
         */
        if (cfs_hash_is_rehashing(hs))
-               cfs_hash_rehash_cancel_locked(hs);
-       cfs_hash_unlock(hs, 1);
+               cfs_hash_rehash_cancel(hs);
 }
 
 static void
  * this approach assumes a reasonably uniform hashing function.  The
  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
  */
-void
-cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
-{
-       int i;
-
-       /* need hold cfs_hash_lock(hs, 1) */
-       LASSERT(cfs_hash_with_rehash(hs) &&
-               !cfs_hash_with_no_lock(hs));
-
-       if (!cfs_hash_is_rehashing(hs))
-               return;
-
-       if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
-               hs->hs_rehash_bits = 0;
-               return;
-       }
-
-       for (i = 2; cfs_hash_is_rehashing(hs); i++) {
-               cfs_hash_unlock(hs, 1);
-               /* raise console warning while waiting too long */
-               CDEBUG(is_power_of_2(i >> 3) ? D_WARNING : D_INFO,
-                      "hash %s is still rehashing, rescheded %d\n",
-                      hs->hs_name, i - 1);
-               cond_resched();
-               cfs_hash_lock(hs, 1);
-       }
-}
-
 void
 cfs_hash_rehash_cancel(struct cfs_hash *hs)
 {
-       cfs_hash_lock(hs, 1);
-       cfs_hash_rehash_cancel_locked(hs);
-       cfs_hash_unlock(hs, 1);
+       LASSERT(cfs_hash_with_rehash(hs));
+       cancel_work_sync(&hs->hs_rehash_work);
 }
 
-int
+void
 cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
 {
        int rc;
        rc = cfs_hash_rehash_bits(hs);
        if (rc <= 0) {
                cfs_hash_unlock(hs, 1);
-               return rc;
+               return;
        }
 
        hs->hs_rehash_bits = rc;
        if (!do_rehash) {
                /* launch and return */
-               cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
+               queue_work(cfs_rehash_wq, &hs->hs_rehash_work);
                cfs_hash_unlock(hs, 1);
-               return 0;
+               return;
        }
 
        /* rehash right now */
        cfs_hash_unlock(hs, 1);
 
-       return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
+       cfs_hash_rehash_worker(&hs->hs_rehash_work);
 }
 
 static int
        return c;
 }
 
-static int
-cfs_hash_rehash_worker(struct cfs_workitem *wi)
+static void
+cfs_hash_rehash_worker(struct work_struct *work)
 {
-       struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
+       struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_rehash_work);
        struct cfs_hash_bucket **bkts;
        struct cfs_hash_bd bd;
        unsigned int old_size;
        hs->hs_cur_bits = hs->hs_rehash_bits;
 out:
        hs->hs_rehash_bits = 0;
-       if (rc == -ESRCH) /* never be scheduled again */
-               cfs_wi_exit(cfs_sched_rehash, wi);
        bsize = cfs_hash_bkt_size(hs);
        cfs_hash_unlock(hs, 1);
        /* can't refer to @hs anymore because it could be destroyed */
                cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
        if (rc)
                CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
-       /* return 1 only if cfs_wi_exit is called */
-       return rc == -ESRCH;
 }
 
 /**