struct dlm_lock_resource *res);
 void dlm_clean_master_list(struct dlm_ctxt *dlm,
                           u8 dead_node);
+void dlm_force_free_mles(struct dlm_ctxt *dlm);
 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
 int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
 int __dlm_lockres_unused(struct dlm_lock_resource *res);
 
        wake_up(&res->wq);
        wake_up(&dlm->migration_wq);
 }
+
+void dlm_force_free_mles(struct dlm_ctxt *dlm)
+{
+       int i;
+       struct hlist_head *bucket;
+       struct dlm_master_list_entry *mle;
+       struct hlist_node *tmp, *list;
+
+       /*
+        * We notified all other nodes that we are exiting the domain and
+        * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
+        * around we force free them and wake any processes that are waiting
+        * on the mles
+        */
+       spin_lock(&dlm->spinlock);
+       spin_lock(&dlm->master_lock);
+
+       BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
+       BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
+
+       for (i = 0; i < DLM_HASH_BUCKETS; i++) {
+               bucket = dlm_master_hash(dlm, i);
+               hlist_for_each_safe(list, tmp, bucket) {
+                       mle = hlist_entry(list, struct dlm_master_list_entry,
+                                         master_hash_node);
+                       if (mle->type != DLM_MLE_BLOCK) {
+                               mlog(ML_ERROR, "bad mle: %p\n", mle);
+                               dlm_print_one_mle(mle);
+                       }
+                       atomic_set(&mle->woken, 1);
+                       wake_up(&mle->wq);
+
+                       __dlm_unlink_mle(dlm, mle);
+                       __dlm_mle_detach_hb_events(dlm, mle);
+                       __dlm_put_mle(mle);
+               }
+       }
+       spin_unlock(&dlm->master_lock);
+       spin_unlock(&dlm->spinlock);
+}