spin_unlock(&xprt->queue_lock);
 }
 
-static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
+static void xprt_complete_request_init(struct rpc_task *task)
+{
+       if (task->tk_rqstp)
+               xprt_request_init(task);
+}
+
+void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
 {
        set_bit(XPRT_CONGESTED, &xprt->state);
-       rpc_sleep_on(&xprt->backlog, task, NULL);
+       rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
 }
+EXPORT_SYMBOL_GPL(xprt_add_backlog);
 
 static bool __xprt_set_rq(struct rpc_task *task, void *data)
 {
 
        if (task->tk_rqstp == NULL) {
                memset(req, 0, sizeof(*req));   /* mark unused */
-               task->tk_status = -EAGAIN;
                task->tk_rqstp = req;
                return true;
        }
        return false;
 }
 
-static bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
+bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
 {
        if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
                clear_bit(XPRT_CONGESTED, &xprt->state);
        }
        return true;
 }
+EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
 
 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
 {
                goto out;
        spin_lock(&xprt->reserve_lock);
        if (test_bit(XPRT_CONGESTED, &xprt->state)) {
-               rpc_sleep_on(&xprt->backlog, task, NULL);
+               xprt_add_backlog(xprt, task);
                ret = true;
        }
        spin_unlock(&xprt->reserve_lock);
        struct rpc_xprt *xprt = task->tk_xprt;
        struct rpc_rqst *req = task->tk_rqstp;
 
-       if (req->rq_task)
-               /* Already initialized */
-               return;
-
        req->rq_task    = task;
        req->rq_xprt    = xprt;
        req->rq_buffer  = NULL;
        struct rpc_xprt *xprt = task->tk_xprt;
 
        task->tk_status = 0;
-       if (task->tk_rqstp != NULL) {
-               xprt_request_init(task);
+       if (task->tk_rqstp != NULL)
                return;
-       }
 
        task->tk_status = -EAGAIN;
        xprt_do_reserve(xprt, task);
        }
 
        xprt = req->rq_xprt;
-       if (xprt) {
-               xprt_request_dequeue_xprt(task);
-               spin_lock(&xprt->transport_lock);
-               xprt->ops->release_xprt(xprt, task);
-               if (xprt->ops->release_request)
-                       xprt->ops->release_request(task);
-               xprt_schedule_autodisconnect(xprt);
-               spin_unlock(&xprt->transport_lock);
-               if (req->rq_buffer)
-                       xprt->ops->buf_free(task);
-               xdr_free_bvec(&req->rq_rcv_buf);
-               xdr_free_bvec(&req->rq_snd_buf);
-               if (req->rq_cred != NULL)
-                       put_rpccred(req->rq_cred);
-               if (req->rq_release_snd_buf)
-                       req->rq_release_snd_buf(req);
-       } else
-               xprt = task->tk_xprt;
+       xprt_request_dequeue_xprt(task);
+       spin_lock(&xprt->transport_lock);
+       xprt->ops->release_xprt(xprt, task);
+       if (xprt->ops->release_request)
+               xprt->ops->release_request(task);
+       xprt_schedule_autodisconnect(xprt);
+       spin_unlock(&xprt->transport_lock);
+       if (req->rq_buffer)
+               xprt->ops->buf_free(task);
+       xdr_free_bvec(&req->rq_rcv_buf);
+       xdr_free_bvec(&req->rq_snd_buf);
+       if (req->rq_cred != NULL)
+               put_rpccred(req->rq_cred);
+       if (req->rq_release_snd_buf)
+               req->rq_release_snd_buf(req);
 
        task->tk_rqstp = NULL;
        if (likely(!bc_prealloc(req)))
 
        return;
 
 out_sleep:
-       set_bit(XPRT_CONGESTED, &xprt->state);
-       rpc_sleep_on(&xprt->backlog, task, NULL);
        task->tk_status = -EAGAIN;
+       xprt_add_backlog(xprt, task);
 }
 
 /**
        struct rpcrdma_xprt *r_xprt =
                container_of(xprt, struct rpcrdma_xprt, rx_xprt);
 
-       memset(rqst, 0, sizeof(*rqst));
-       rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
-       if (unlikely(!rpc_wake_up_next(&xprt->backlog)))
-               clear_bit(XPRT_CONGESTED, &xprt->state);
+       rpcrdma_reply_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
+       if (!xprt_wake_up_backlog(xprt, rqst)) {
+               memset(rqst, 0, sizeof(*rqst));
+               rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
+       }
 }
 
 static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt,
 
        return mr;
 }
 
+/**
+ * rpcrdma_reply_put - Put reply buffers back into pool
+ * @buffers: buffer pool
+ * @req: object to return
+ *
+ */
+void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
+{
+       if (req->rl_reply) {
+               rpcrdma_rep_put(buffers, req->rl_reply);
+               req->rl_reply = NULL;
+       }
+}
+
 /**
  * rpcrdma_buffer_get - Get a request buffer
  * @buffers: Buffer pool from which to obtain a buffer
  */
 void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
 {
-       if (req->rl_reply)
-               rpcrdma_rep_put(buffers, req->rl_reply);
-       req->rl_reply = NULL;
+       rpcrdma_reply_put(buffers, req);
 
        spin_lock(&buffers->rb_lock);
        list_add(&req->rl_list, &buffers->rb_send_bufs);