#endif
 
 static void rpcrdma_reset_frmrs(struct rpcrdma_ia *);
+static void rpcrdma_reset_fmrs(struct rpcrdma_ia *);
 
 /*
  * internal functions
                rpcrdma_ep_disconnect(ep, ia);
                rpcrdma_flush_cqs(ep);
 
-               if (ia->ri_memreg_strategy == RPCRDMA_FRMR)
+               switch (ia->ri_memreg_strategy) {
+               case RPCRDMA_FRMR:
                        rpcrdma_reset_frmrs(ia);
+                       break;
+               case RPCRDMA_MTHCAFMR:
+                       rpcrdma_reset_fmrs(ia);
+                       break;
+               case RPCRDMA_ALLPHYSICAL:
+                       break;
+               default:
+                       rc = -EIO;
+                       goto out;
+               }
 
                xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
                id = rpcrdma_create_id(xprt, ia,
        kfree(buf->rb_pool);
 }
 
+/* After a disconnect, unmap all FMRs.
+ *
+ * This is invoked only in the transport connect worker in order
+ * to serialize with rpcrdma_register_fmr_external().
+ */
+static void
+rpcrdma_reset_fmrs(struct rpcrdma_ia *ia)
+{
+       struct rpcrdma_xprt *r_xprt =
+                               container_of(ia, struct rpcrdma_xprt, rx_ia);
+       struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+       struct list_head *pos;
+       struct rpcrdma_mw *r;
+       LIST_HEAD(l);
+       int rc;
+
+       list_for_each(pos, &buf->rb_all) {
+               r = list_entry(pos, struct rpcrdma_mw, mw_all);
+
+               INIT_LIST_HEAD(&l);
+               list_add(&r->r.fmr->list, &l);
+               rc = ib_unmap_fmr(&l);
+               if (rc)
+                       dprintk("RPC:       %s: ib_unmap_fmr failed %i\n",
+                               __func__, rc);
+       }
+}
+
 /* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in
  * an unusable state. Find FRMRs in this state and dereg / reg
  * each.  FRMRs that are VALID and attached to an rpcrdma_req are