int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
                          u8 nodenum, u8 *real_master);
 
+void __dlm_do_purge_lockres(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res);
 
 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
                               struct dlm_lock_resource *res,
 
                mlog(ML_NOTICE, "%s:%.*s: node %u sends deref done "
                        "but it is already derefed!\n", dlm->name,
                        res->lockname.len, res->lockname.name, node);
-               dlm_lockres_put(res);
                ret = 0;
                goto done;
        }
 
-       if (!list_empty(&res->purge)) {
-               mlog(0, "%s: Removing res %.*s from purgelist\n",
-                       dlm->name, res->lockname.len, res->lockname.name);
-               list_del_init(&res->purge);
-               dlm_lockres_put(res);
-               dlm->purge_count--;
-       }
-
-       if (!__dlm_lockres_unused(res)) {
-               mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
-                       dlm->name, res->lockname.len, res->lockname.name);
-               __dlm_print_one_lock_resource(res);
-               BUG();
-       }
-
-       __dlm_unhash_lockres(dlm, res);
-
-       spin_lock(&dlm->track_lock);
-       if (!list_empty(&res->tracking))
-               list_del_init(&res->tracking);
-       else {
-               mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n",
-                    dlm->name, res->lockname.len, res->lockname.name);
-               __dlm_print_one_lock_resource(res);
-       }
-       spin_unlock(&dlm->track_lock);
-
-       /* lockres is not in the hash now. drop the flag and wake up
-        * any processes waiting in dlm_get_lock_resource.
-        */
-       res->state &= ~DLM_LOCK_RES_DROPPING_REF;
+       __dlm_do_purge_lockres(dlm, res);
        spin_unlock(&res->spinlock);
        wake_up(&res->wq);
 
-       dlm_lockres_put(res);
-
        spin_unlock(&dlm->spinlock);
 
        ret = 0;
 done:
+       if (res)
+               dlm_lockres_put(res);
        dlm_put(dlm);
        return ret;
 }
 
        struct dlm_lock_resource *res;
        int i;
        struct hlist_head *bucket;
+       struct hlist_node *tmp;
        struct dlm_lock *lock;
 
 
         */
        for (i = 0; i < DLM_HASH_BUCKETS; i++) {
                bucket = dlm_lockres_hash(dlm, i);
-               hlist_for_each_entry(res, bucket, hash_node) {
+               hlist_for_each_entry_safe(res, tmp, bucket, hash_node) {
                        /* always prune any $RECOVERY entries for dead nodes,
                         * otherwise hangs can occur during later recovery */
                        if (dlm_is_recovery_lock(res->lockname.name,
                                                break;
                                        }
                                }
-                               dlm_lockres_clear_refmap_bit(dlm, res,
-                                               dead_node);
+
+                               if ((res->owner == dead_node) &&
+                                                       (res->state & DLM_LOCK_RES_DROPPING_REF)) {
+                                       dlm_lockres_get(res);
+                                       __dlm_do_purge_lockres(dlm, res);
+                                       spin_unlock(&res->spinlock);
+                                       wake_up(&res->wq);
+                                       dlm_lockres_put(res);
+                                       continue;
+                               } else if (res->owner == dlm->node_num)
+                                       dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
                                spin_unlock(&res->spinlock);
                                continue;
                        }
                                if (res->state & DLM_LOCK_RES_DROPPING_REF) {
                                        mlog(0, "%s:%.*s: owned by "
                                                "dead node %u, this node was "
-                                               "dropping its ref when it died. "
-                                               "continue, dropping the flag.\n",
+                                               "dropping its ref when master died. "
+                                               "continue, purging the lockres.\n",
                                                dlm->name, res->lockname.len,
                                                res->lockname.name, dead_node);
+                                       dlm_lockres_get(res);
+                                       __dlm_do_purge_lockres(dlm, res);
+                                       spin_unlock(&res->spinlock);
+                                       wake_up(&res->wq);
+                                       dlm_lockres_put(res);
+                                       continue;
                                }
-                               res->state &= ~DLM_LOCK_RES_DROPPING_REF;
-                               dlm_move_lockres_to_recovery_list(dlm,
-                                               res);
                        } else if (res->owner == dlm->node_num) {
                                dlm_free_dead_locks(dlm, res, dead_node);
                                __dlm_lockres_calc_usage(dlm, res);
 
        spin_unlock(&dlm->spinlock);
 }
 
+/*
+ * Do the real purge work:
+ *     unhash the lockres, and
+ *     clear flag DLM_LOCK_RES_DROPPING_REF.
+ * It requires dlm and lockres spinlock to be taken.
+ */
+void __dlm_do_purge_lockres(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res)
+{
+       assert_spin_locked(&dlm->spinlock);
+       assert_spin_locked(&res->spinlock);
+
+       if (!list_empty(&res->purge)) {
+               mlog(0, "%s: Removing res %.*s from purgelist\n",
+                    dlm->name, res->lockname.len, res->lockname.name);
+               list_del_init(&res->purge);
+               dlm_lockres_put(res);
+               dlm->purge_count--;
+       }
+
+       if (!__dlm_lockres_unused(res)) {
+               mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
+                    dlm->name, res->lockname.len, res->lockname.name);
+               __dlm_print_one_lock_resource(res);
+               BUG();
+       }
+
+       __dlm_unhash_lockres(dlm, res);
+
+       spin_lock(&dlm->track_lock);
+       if (!list_empty(&res->tracking))
+               list_del_init(&res->tracking);
+       else {
+               mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n",
+                    dlm->name, res->lockname.len, res->lockname.name);
+               __dlm_print_one_lock_resource(res);
+       }
+       spin_unlock(&dlm->track_lock);
+
+       /*
+        * lockres is not in the hash now. drop the flag and wake up
+        * any processes waiting in dlm_get_lock_resource.
+        */
+       res->state &= ~DLM_LOCK_RES_DROPPING_REF;
+}
+
 static void dlm_purge_lockres(struct dlm_ctxt *dlm,
                             struct dlm_lock_resource *res)
 {
 
        if (!master) {
                if (res->state & DLM_LOCK_RES_DROPPING_REF) {
-                       mlog(ML_NOTICE, "%s: res %.*s already in "
-                               "DLM_LOCK_RES_DROPPING_REF state\n",
-                               dlm->name, res->lockname.len,
-                               res->lockname.name);
+                       mlog(ML_NOTICE, "%s: res %.*s already in DLM_LOCK_RES_DROPPING_REF state\n",
+                               dlm->name, res->lockname.len, res->lockname.name);
                        spin_unlock(&res->spinlock);
                        return;
                }