}
 }
 
-void
-rpcrdma_conn_func(struct rpcrdma_ep *ep)
-{
-       schedule_delayed_work(&ep->rep_connect_worker, 0);
-}
-
-void
-rpcrdma_connect_worker(struct work_struct *work)
+/**
+ * xprt_rdma_connect_worker - establish connection in the background
+ * @work: worker thread context
+ *
+ * Requester holds the xprt's send lock to prevent activity on this
+ * transport while a fresh connection is being established. RPC tasks
+ * sleep on the xprt's pending queue waiting for connect to complete.
+ */
+static void
+xprt_rdma_connect_worker(struct work_struct *work)
 {
-       struct rpcrdma_ep *ep =
-               container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
-       struct rpcrdma_xprt *r_xprt =
-               container_of(ep, struct rpcrdma_xprt, rx_ep);
+       struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
+                                                  rx_connect_worker.work);
        struct rpc_xprt *xprt = &r_xprt->rx_xprt;
+       int rc;
 
-       spin_lock_bh(&xprt->transport_lock);
-       if (ep->rep_connected > 0) {
+       rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia);
+       xprt_clear_connecting(xprt);
+       if (r_xprt->rx_ep.rep_connected > 0) {
                if (!xprt_test_and_set_connected(xprt)) {
                        xprt->stat.connect_count++;
                        xprt->stat.connect_time += (long)jiffies -
                                                   xprt->stat.connect_start;
-                       xprt_wake_pending_tasks(xprt, 0);
+                       xprt_wake_pending_tasks(xprt, -EAGAIN);
                }
        } else {
                if (xprt_test_and_clear_connected(xprt))
-                       xprt_wake_pending_tasks(xprt, -ENOTCONN);
+                       xprt_wake_pending_tasks(xprt, rc);
        }
-       spin_unlock_bh(&xprt->transport_lock);
-}
-
-static void
-xprt_rdma_connect_worker(struct work_struct *work)
-{
-       struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
-                                                  rx_connect_worker.work);
-       struct rpc_xprt *xprt = &r_xprt->rx_xprt;
-       int rc = 0;
-
-       xprt_clear_connected(xprt);
-
-       rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia);
-       if (rc)
-               xprt_wake_pending_tasks(xprt, rc);
-
-       xprt_clear_connecting(xprt);
 }
 
 static void
 
        cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
 
-       xprt_clear_connected(xprt);
-
        rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
        rpcrdma_buffer_destroy(&r_xprt->rx_buf);
        rpcrdma_ia_close(&r_xprt->rx_ia);
 
        }
 }
 
+/**
+ * rpcrdma_disconnect_worker - Force a disconnect
+ * @work: endpoint to be disconnected
+ *
+ * Provider callbacks can possibly run in an IRQ context. This function
+ * is invoked in a worker thread to guarantee that disconnect wake-up
+ * calls are always done in process context.
+ */
+static void
+rpcrdma_disconnect_worker(struct work_struct *work)
+{
+       struct rpcrdma_ep *ep = container_of(work, struct rpcrdma_ep,
+                                            rep_disconnect_worker.work);
+       struct rpcrdma_xprt *r_xprt =
+               container_of(ep, struct rpcrdma_xprt, rx_ep);
+
+       xprt_force_disconnect(&r_xprt->rx_xprt);
+}
+
 static void
 rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
 {
 
        if (ep->rep_connected == 1) {
                ep->rep_connected = -EIO;
-               rpcrdma_conn_func(ep);
+               schedule_delayed_work(&ep->rep_disconnect_worker, 0);
                wake_up_all(&ep->rep_connect_wait);
        }
 }
                ++xprt->connect_cookie;
                ep->rep_connected = 1;
                rpcrdma_update_connect_private(r_xprt, &event->param.conn);
-               goto connected;
+               wake_up_all(&ep->rep_connect_wait);
+               break;
        case RDMA_CM_EVENT_CONNECT_ERROR:
                ep->rep_connected = -ENOTCONN;
-               goto connected;
+               goto disconnected;
        case RDMA_CM_EVENT_UNREACHABLE:
                ep->rep_connected = -ENETUNREACH;
-               goto connected;
+               goto disconnected;
        case RDMA_CM_EVENT_REJECTED:
                dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
                        rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
                ep->rep_connected = -ECONNREFUSED;
                if (event->status == IB_CM_REJ_STALE_CONN)
                        ep->rep_connected = -EAGAIN;
-               goto connected;
+               goto disconnected;
        case RDMA_CM_EVENT_DISCONNECTED:
                ++xprt->connect_cookie;
                ep->rep_connected = -ECONNABORTED;
-connected:
-               rpcrdma_conn_func(ep);
+disconnected:
+               xprt_force_disconnect(xprt);
                wake_up_all(&ep->rep_connect_wait);
                break;
        default:
                                   cdata->max_requests >> 2);
        ep->rep_send_count = ep->rep_send_batch;
        init_waitqueue_head(&ep->rep_connect_wait);
-       INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
+       INIT_DELAYED_WORK(&ep->rep_disconnect_worker,
+                         rpcrdma_disconnect_worker);
 
        sendcq = ib_alloc_cq(ia->ri_device, NULL,
                             ep->rep_attr.cap.max_send_wr + 1,
 void
 rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
 {
-       cancel_delayed_work_sync(&ep->rep_connect_worker);
+       cancel_delayed_work_sync(&ep->rep_disconnect_worker);
 
        if (ia->ri_id && ia->ri_id->qp) {
                rpcrdma_ep_disconnect(ep, ia);
 {
        struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
                                                   rx_ia);
+       struct rpc_xprt *xprt = &r_xprt->rx_xprt;
        int rc;
 
 retry:
        }
 
        ep->rep_connected = 0;
+       xprt_clear_connected(xprt);
+
        rpcrdma_post_recvs(r_xprt, true);
 
        rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
 
        wait_queue_head_t       rep_connect_wait;
        struct rpcrdma_connect_private  rep_cm_private;
        struct rdma_conn_param  rep_remote_cma;
-       struct delayed_work     rep_connect_worker;
+       struct delayed_work     rep_disconnect_worker;
 };
 
 /* Pre-allocate extra Work Requests for handling backward receives
                                struct rpcrdma_create_data_internal *);
 void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
 int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
-void rpcrdma_conn_func(struct rpcrdma_ep *ep);
 void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
 
 int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
 extern unsigned int xprt_rdma_max_inline_read;
 void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
 void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
-void rpcrdma_connect_worker(struct work_struct *work);
 void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
 int xprt_rdma_init(void);
 void xprt_rdma_cleanup(void);