return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags);
 }
 
+int ldlm_bl_thread_wakeup(void)
+{
+       wake_up(&ldlm_state->ldlm_bl_pool->blp_waitq);
+       return 0;
+}
+
 /* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */
 static int ldlm_handle_setinfo(struct ptlrpc_request *req)
 {
        return 0;
 }
 
-static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
+static int ldlm_bl_get_work(struct ldlm_bl_pool *blp,
+                           struct ldlm_bl_work_item **p_blwi,
+                           struct obd_export **p_exp)
 {
+       int num_th = atomic_read(&blp->blp_num_threads);
        struct ldlm_bl_work_item *blwi = NULL;
        static unsigned int num_bl;
 
                                          blwi_entry);
 
        if (blwi) {
-               if (++num_bl >= atomic_read(&blp->blp_num_threads))
+               if (++num_bl >= num_th)
                        num_bl = 0;
                list_del(&blwi->blwi_entry);
        }
        spin_unlock(&blp->blp_lock);
+       *p_blwi = blwi;
 
-       return blwi;
+       return (*p_blwi || *p_exp) ? 1 : 0;
 }
 
 /* This only contains temporary data until the thread starts */
        return 0;
 }
 
+/* Not fatal if racy and have a few too many threads */
+static int ldlm_bl_thread_need_create(struct ldlm_bl_pool *blp,
+                                     struct ldlm_bl_work_item *blwi)
+{
+       int busy = atomic_read(&blp->blp_busy_threads);
+
+       if (busy >= blp->blp_max_threads)
+               return 0;
+
+       if (busy < atomic_read(&blp->blp_num_threads))
+               return 0;
+
+       if (blwi && (!blwi->blwi_ns || blwi->blwi_mem_pressure))
+               return 0;
+
+       return 1;
+}
+
+static int ldlm_bl_thread_blwi(struct ldlm_bl_pool *blp,
+                              struct ldlm_bl_work_item *blwi)
+{
+       if (!blwi->blwi_ns)
+               /* added by ldlm_cleanup() */
+               return LDLM_ITER_STOP;
+
+       if (blwi->blwi_mem_pressure)
+               memory_pressure_set();
+
+       OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL2, 4);
+
+       if (blwi->blwi_count) {
+               int count;
+
+               /*
+                * The special case when we cancel locks in lru
+                * asynchronously, we pass the list of locks here.
+                * Thus locks are marked LDLM_FL_CANCELING, but NOT
+                * canceled locally yet.
+                */
+               count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
+                                                  blwi->blwi_count,
+                                                  LCF_BL_AST);
+               ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL,
+                                    blwi->blwi_flags);
+       } else {
+               ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
+                                       blwi->blwi_lock);
+       }
+       if (blwi->blwi_mem_pressure)
+               memory_pressure_clr();
+
+       if (blwi->blwi_flags & LCF_ASYNC)
+               kfree(blwi);
+       else
+               complete(&blwi->blwi_comp);
+
+       return 0;
+}
+
 /**
  * Main blocking requests processing thread.
  *
 static int ldlm_bl_thread_main(void *arg)
 {
        struct ldlm_bl_pool *blp;
+       struct ldlm_bl_thread_data *bltd = arg;
 
-       {
-               struct ldlm_bl_thread_data *bltd = arg;
-
-               blp = bltd->bltd_blp;
+       blp = bltd->bltd_blp;
 
-               atomic_inc(&blp->blp_num_threads);
-               atomic_inc(&blp->blp_busy_threads);
+       atomic_inc(&blp->blp_num_threads);
+       atomic_inc(&blp->blp_busy_threads);
 
-               complete(&bltd->bltd_comp);
-               /* cannot use bltd after this, it is only on caller's stack */
-       }
+       complete(&bltd->bltd_comp);
+       /* cannot use bltd after this, it is only on caller's stack */
 
        while (1) {
                struct l_wait_info lwi = { 0 };
                struct ldlm_bl_work_item *blwi = NULL;
-               int busy;
+               struct obd_export *exp = NULL;
+               int rc;
 
-               blwi = ldlm_bl_get_work(blp);
-
-               if (!blwi) {
+               rc = ldlm_bl_get_work(blp, &blwi, &exp);
+               if (!rc) {
                        atomic_dec(&blp->blp_busy_threads);
                        l_wait_event_exclusive(blp->blp_waitq,
-                                              (blwi = ldlm_bl_get_work(blp)),
+                                              ldlm_bl_get_work(blp, &blwi,
+                                                               &exp),
                                               &lwi);
-                       busy = atomic_inc_return(&blp->blp_busy_threads);
-               } else {
-                       busy = atomic_read(&blp->blp_busy_threads);
+                       atomic_inc(&blp->blp_busy_threads);
                }
 
-               if (!blwi->blwi_ns)
-                       /* added by ldlm_cleanup() */
-                       break;
-
-               /* Not fatal if racy and have a few too many threads */
-               if (unlikely(busy < blp->blp_max_threads &&
-                            busy >= atomic_read(&blp->blp_num_threads) &&
-                            !blwi->blwi_mem_pressure))
+               if (ldlm_bl_thread_need_create(blp, blwi))
                        /* discard the return value, we tried */
                        ldlm_bl_thread_start(blp);
 
-               if (blwi->blwi_mem_pressure)
-                       memory_pressure_set();
-
-               if (blwi->blwi_count) {
-                       int count;
-                       /* The special case when we cancel locks in LRU
-                        * asynchronously, we pass the list of locks here.
-                        * Thus locks are marked LDLM_FL_CANCELING, but NOT
-                        * canceled locally yet.
-                        */
-                       count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
-                                                          blwi->blwi_count,
-                                                          LCF_BL_AST);
-                       ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL,
-                                            blwi->blwi_flags);
-               } else {
-                       ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
-                                               blwi->blwi_lock);
-               }
-               if (blwi->blwi_mem_pressure)
-                       memory_pressure_clr();
+               if (blwi)
+                       rc = ldlm_bl_thread_blwi(blp, blwi);
 
-               if (blwi->blwi_flags & LCF_ASYNC)
-                       kfree(blwi);
-               else
-                       complete(&blwi->blwi_comp);
+               if (rc == LDLM_ITER_STOP)
+                       break;
        }
 
        atomic_dec(&blp->blp_busy_threads);