]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
RDS: IB: Add proxy qp to support FRWR through RDS_GET_MR
authorAvinash Repaka <avinash.repaka@oracle.com>
Thu, 13 Apr 2017 01:00:05 +0000 (18:00 -0700)
committerChuck Anderson <chuck.anderson@oracle.com>
Tue, 19 Sep 2017 06:18:09 +0000 (23:18 -0700)
MR registration requested through RDS_GET_MR socket option will not have
any connection details. So, there isn't an appropriate qp to post the
registration/invalidation requests. This patch solves that issue by
using a proxy qp.

Orabug: 25669255

Signed-off-by: Avinash Repaka <avinash.repaka@oracle.com>
Tested-by: Gerald Gibson <gerald.gibson@oracle.com>
Tested-by: Efrain Galaviz <efrain.galaviz@oracle.com>
Reviewed-by: Wei Lin Guay <wei.lin.guay@oracle.com>
net/rds/ib.c
net/rds/ib.h
net/rds/ib_cm.c
net/rds/ib_rdma.c

index 0226f14d8b14ce17c9dcbdf2d507718b618804b6..bccffbc12e97d397a2ae0dbc1e9e0c792dfbcac2 100644 (file)
@@ -213,6 +213,11 @@ static void rds_ib_dev_free_dev(struct rds_ib_device *rds_ibdev)
                rds_ib_destroy_mr_pool(rds_ibdev->mr_8k_pool);
        if (rds_ibdev->mr_1m_pool)
                rds_ib_destroy_mr_pool(rds_ibdev->mr_1m_pool);
+       if (rds_ibdev->use_fastreg) {
+               cancel_work_sync(&rds_ibdev->fastreg_reset_w);
+               down_write(&rds_ibdev->fastreg_lock);
+               rds_ib_destroy_fastreg(rds_ibdev);
+       }
        if (rds_ibdev->mr)
                ib_dereg_mr(rds_ibdev->mr);
        if (rds_ibdev->pd)
@@ -287,6 +292,9 @@ void rds_ib_remove_one(struct ib_device *device)
        struct rds_ib_device *rds_ibdev;
        int i;
 
+       rds_rtd(RDS_RTD_RDMA_IB, "Removing ib_device: %p name: %s num_ports: %u\n",
+               device, device->name, device->phys_port_cnt);
+
        rds_ibdev = ib_get_client_data(device, &rds_ib_client);
        if (!rds_ibdev) {
                rds_rtd(RDS_RTD_ACT_BND, "rds_ibdev is NULL, ib_device %p\n",
@@ -1990,6 +1998,9 @@ void rds_ib_add_one(struct ib_device *device)
        struct ib_device_attr *dev_attr;
        bool has_frwr, has_fmr;
 
+       rds_rtd(RDS_RTD_RDMA_IB, "Adding ib_device: %p name: %s num_ports: %u\n",
+               device, device->name, device->phys_port_cnt);
+
        /* Only handle IB (no iWARP) devices */
        if (device->node_type != RDMA_NODE_IB_CA)
                return;
@@ -2072,6 +2083,16 @@ void rds_ib_add_one(struct ib_device *device)
        pr_info("RDS/IB: %s will be used for ib_device: %s\n",
                rds_ibdev->use_fastreg ? "FRWR" : "FMR", device->name);
 
+       if (rds_ibdev->use_fastreg) {
+               INIT_WORK(&rds_ibdev->fastreg_reset_w, rds_ib_reset_fastreg);
+               init_rwsem(&rds_ibdev->fastreg_lock);
+               atomic_set(&rds_ibdev->fastreg_wrs, RDS_IB_DEFAULT_FREG_WR);
+               if (rds_ib_setup_fastreg(rds_ibdev)) {
+                       pr_err("RDS/IB: Failed to setup fastreg resources\n");
+                       goto put_dev;
+               }
+       }
+
        rds_ibdev->mr_1m_pool =
                rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_1M_POOL);
        if (IS_ERR(rds_ibdev->mr_1m_pool)) {
index 7f0bd52dddfb853cb67d036e6305c1a45ff56783..0c765ca6000d3a98e46611d4a3cab3e7bd202707 100644 (file)
@@ -52,6 +52,8 @@
 
 #define        RDS_IB_CLEAN_CACHE      1
 
+#define RDS_IB_DEFAULT_FREG_PORT_NUM   1
+
 extern struct rw_semaphore rds_ib_devices_lock;
 extern struct list_head rds_ib_devices;
 
@@ -418,7 +420,17 @@ struct rds_ib_device {
        struct list_head        conn_list;
        struct ib_device        *dev;
        struct ib_pd            *pd;
+
        bool                    use_fastreg;
+       int                     fastreg_cq_vector;
+       struct ib_cq            *fastreg_cq;
+       struct ib_wc            fastreg_wc[RDS_WC_MAX];
+       struct ib_qp            *fastreg_qp;
+       struct tasklet_struct   fastreg_tasklet;
+       atomic_t                fastreg_wrs;
+       struct rw_semaphore     fastreg_lock;
+       struct work_struct      fastreg_reset_w;
+
        struct ib_mr            *mr;
        struct rds_ib_mr_pool   *mr_1m_pool;
        struct rds_ib_mr_pool   *mr_8k_pool;
@@ -582,6 +594,9 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn,
                                struct rdma_cm_event *event);
 void rds_ib_init_frag(unsigned int version);
 void rds_ib_conn_destroy_init(struct rds_connection *conn);
+void rds_ib_destroy_fastreg(struct rds_ib_device *rds_ibdev);
+int rds_ib_setup_fastreg(struct rds_ib_device *rds_ibdev);
+void rds_ib_reset_fastreg(struct work_struct *work);
 
 /* ib_rdma.c */
 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
@@ -599,6 +614,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate);
 void rds_ib_flush_mrs(void);
 int rds_ib_fmr_init(void);
 void rds_ib_fmr_exit(void);
+void rds_ib_fcq_handler(struct rds_ib_device *rds_ibdev, struct ib_wc *wc);
 void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
 
 /* ib_recv.c */
index d433836d023101c8a49487ce50ca7ae0d61314f6..f2d409496073776fdf3c7fe9b9c6a38cb4d61327 100644 (file)
@@ -398,6 +398,13 @@ static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
                 event->event, rds_ib_event_str(event->event), data);
 }
 
+static void rds_ib_cq_comp_handler_fastreg(struct ib_cq *cq, void *context)
+{
+       struct rds_ib_device *rds_ibdev = context;
+
+       tasklet_schedule(&rds_ibdev->fastreg_tasklet);
+}
+
 static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context)
 {
        struct rds_connection *conn = context;
@@ -422,6 +429,20 @@ static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context)
        tasklet_schedule(&ic->i_rtasklet);
 }
 
+static void poll_fcq(struct rds_ib_device *rds_ibdev, struct ib_cq *cq,
+                    struct ib_wc *wcs)
+{
+       int nr, i;
+       struct ib_wc *wc;
+
+       while ((nr = ib_poll_cq(cq, RDS_WC_MAX, wcs)) > 0) {
+               for (i = 0; i < nr; i++) {
+                       wc = wcs + i;
+                       rds_ib_fcq_handler(rds_ibdev, wc);
+               }
+       }
+}
+
 static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq,
                     struct ib_wc *wcs)
 {
@@ -473,6 +494,15 @@ static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq,
        }
 }
 
+static void rds_ib_tasklet_fn_fastreg(unsigned long data)
+{
+       struct rds_ib_device *rds_ibdev = (struct rds_ib_device *)data;
+
+       poll_fcq(rds_ibdev, rds_ibdev->fastreg_cq, rds_ibdev->fastreg_wc);
+       ib_req_notify_cq(rds_ibdev->fastreg_cq, IB_CQ_NEXT_COMP);
+       poll_fcq(rds_ibdev, rds_ibdev->fastreg_cq, rds_ibdev->fastreg_wc);
+}
+
 void rds_ib_tasklet_fn_send(unsigned long data)
 {
        struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
@@ -636,7 +666,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
        struct ib_qp_init_attr attr;
        struct rds_ib_device *rds_ibdev;
        int ret;
-       int mr_reg, mr_inv;
+       int mr_reg;
 
        /*
         * It's normal to see a null device if an incoming connection races
@@ -646,25 +676,22 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
        if (!rds_ibdev)
                return -EOPNOTSUPP;
 
-       /* In the case of FRWR, mr registration and invalidation wrs use the
+       /* In the case of FRWR, mr registration wrs use the
         * same work queue as the send wrs. To make sure that we are not
         * overflowing the workqueue, we allocate separately for each operation.
-        * mr_reg and mr_inv are the wr numbers allocated for reg and inv.
+        * mr_reg is the wr numbers allocated for reg.
         */
-       if (rds_ibdev->use_fastreg) {
+       if (rds_ibdev->use_fastreg)
                mr_reg = RDS_IB_DEFAULT_FREG_WR;
-               mr_inv = 1;
-       } else {
+       else
                mr_reg = 0;
-               mr_inv = 0;
-       }
 
        /* add the conn now so that connection establishment has the dev */
        rds_ib_add_conn(rds_ibdev, conn);
 
-       if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1 + mr_reg + mr_inv)
+       if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1 + mr_reg)
                rds_ib_ring_resize(&ic->i_send_ring,
-                                  rds_ibdev->max_wrs - 1 - mr_reg - mr_inv);
+                                  rds_ibdev->max_wrs - 1 - mr_reg);
        if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
                rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);
 
@@ -675,7 +702,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
        ic->i_scq_vector = ibdev_get_unused_vector(rds_ibdev);
        ic->i_scq = ib_create_cq(dev, rds_ib_cq_comp_handler_send,
                                rds_ib_cq_event_handler, conn,
-                               ic->i_send_ring.w_nr + 1 + mr_reg + mr_inv,
+                               ic->i_send_ring.w_nr + 1 + mr_reg,
                                ic->i_scq_vector);
        if (IS_ERR(ic->i_scq)) {
                ret = PTR_ERR(ic->i_scq);
@@ -721,7 +748,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
        attr.event_handler = rds_ib_qp_event_handler;
        attr.qp_context = conn;
        /* + 1 to allow for the single ack message */
-       attr.cap.max_send_wr = ic->i_send_ring.w_nr + 1 + mr_reg + mr_inv;
+       attr.cap.max_send_wr = ic->i_send_ring.w_nr + 1 + mr_reg;
        attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1;
        attr.cap.max_send_sge = rds_ibdev->max_sge;
        attr.cap.max_recv_sge = RDS_IB_RECV_SGE;
@@ -1386,3 +1413,214 @@ void rds_ib_conn_free(void *arg)
 
        kfree(ic);
 }
+
+void rds_ib_destroy_fastreg(struct rds_ib_device *rds_ibdev)
+{
+       /* Because we are using rw_lock, by this point we should have
+        * received completions for all the wrs posted
+        */
+       WARN_ON(atomic_read(&rds_ibdev->fastreg_wrs) != RDS_IB_DEFAULT_FREG_WR);
+
+       tasklet_kill(&rds_ibdev->fastreg_tasklet);
+       if (rds_ibdev->fastreg_qp) {
+               /* Destroy qp */
+               if (ib_destroy_qp(rds_ibdev->fastreg_qp))
+                       pr_err("Error destroying fastreg qp for rds_ibdev: %p\n",
+                              rds_ibdev);
+               rds_ibdev->fastreg_qp = NULL;
+       }
+
+       if (rds_ibdev->fastreg_cq) {
+               /* Destroy cq and cq_vector */
+               if (ib_destroy_cq(rds_ibdev->fastreg_cq))
+                       pr_err("Error destroying fastreg cq for rds_ibdev: %p\n",
+                              rds_ibdev);
+               rds_ibdev->fastreg_cq = NULL;
+               ibdev_put_vector(rds_ibdev, rds_ibdev->fastreg_cq_vector);
+       }
+}
+
+int rds_ib_setup_fastreg(struct rds_ib_device *rds_ibdev)
+{
+       int ret = 0;
+       struct ib_qp_init_attr qp_init_attr;
+       struct ib_qp_attr qp_attr;
+       struct ib_port_attr port_attr;
+       int gid_index = 0;
+       union ib_gid dgid;
+
+       rds_ibdev->fastreg_cq_vector = ibdev_get_unused_vector(rds_ibdev);
+       rds_ibdev->fastreg_cq = ib_create_cq(rds_ibdev->dev,
+                                            rds_ib_cq_comp_handler_fastreg,
+                                            rds_ib_cq_event_handler,
+                                            rds_ibdev,
+                                            RDS_IB_DEFAULT_FREG_WR + 1,
+                                            rds_ibdev->fastreg_cq_vector);
+       if (IS_ERR(rds_ibdev->fastreg_cq)) {
+               ret = PTR_ERR(rds_ibdev->fastreg_cq);
+               rds_ibdev->fastreg_cq = NULL;
+               ibdev_put_vector(rds_ibdev, rds_ibdev->fastreg_cq_vector);
+               rds_rtd(RDS_RTD_ERR, "ib_create_cq failed: %d\n", ret);
+               goto clean_up;
+       }
+
+       ret = ib_req_notify_cq(rds_ibdev->fastreg_cq, IB_CQ_NEXT_COMP);
+       if (ret)
+               goto clean_up;
+       rds_rtd(RDS_RTD_RDMA_IB,
+               "Successfully created fast reg cq for ib_device: %p\n",
+               rds_ibdev->dev);
+
+       memset(&qp_init_attr, 0, sizeof(qp_init_attr));
+       qp_init_attr.send_cq            = rds_ibdev->fastreg_cq;
+       qp_init_attr.recv_cq            = rds_ibdev->fastreg_cq;
+       qp_init_attr.qp_type            = IB_QPT_RC;
+       /* 1 WR is used for invalidaton */
+       qp_init_attr.cap.max_send_wr    = RDS_IB_DEFAULT_FREG_WR + 1;
+       qp_init_attr.cap.max_recv_wr    = 0;
+       qp_init_attr.cap.max_send_sge   = 0;
+       qp_init_attr.cap.max_recv_sge   = 0;
+
+       rds_ibdev->fastreg_qp = ib_create_qp(rds_ibdev->pd, &qp_init_attr);
+       if (IS_ERR(rds_ibdev->fastreg_qp)) {
+               ret = PTR_ERR(rds_ibdev->fastreg_qp);
+               rds_ibdev->fastreg_qp = NULL;
+               rds_rtd(RDS_RTD_ERR, "ib_create_qp failed: %d\n", ret);
+               goto clean_up;
+       }
+       rds_rtd(RDS_RTD_RDMA_IB,
+               "Successfully created fast reg qp for ib_device: %p\n",
+               rds_ibdev->dev);
+
+       /* Use modify_qp verb to change the state from RESET to INIT */
+       memset(&qp_attr, 0, sizeof(qp_attr));
+       qp_attr.qp_state        = IB_QPS_INIT;
+       qp_attr.pkey_index      = 0;
+       qp_attr.qp_access_flags = IB_ACCESS_REMOTE_READ |
+                                 IB_ACCESS_REMOTE_WRITE;
+       qp_attr.port_num        = RDS_IB_DEFAULT_FREG_PORT_NUM;
+
+       ret = ib_modify_qp(rds_ibdev->fastreg_qp, &qp_attr, IB_QP_STATE |
+                                               IB_QP_PKEY_INDEX        |
+                                               IB_QP_ACCESS_FLAGS      |
+                                               IB_QP_PORT);
+       if (ret) {
+               rds_rtd(RDS_RTD_ERR, "ib_modify_qp to IB_QPS_INIT failed: %d\n",
+                       ret);
+               goto clean_up;
+       }
+       rds_rtd(RDS_RTD_RDMA_IB,
+               "Successfully moved qp to INIT state for ib_device: %p\n",
+               rds_ibdev->dev);
+
+       /* query port to get the lid */
+       ret = ib_query_port(rds_ibdev->dev, RDS_IB_DEFAULT_FREG_PORT_NUM,
+                           &port_attr);
+       if (ret) {
+               rds_rtd(RDS_RTD_ERR, "ib_query_port failed: %d\n", ret);
+               goto clean_up;
+       }
+       rds_rtd(RDS_RTD_RDMA_IB,
+               "Successfully queried the port and the port is in %d state\n",
+               port_attr.state);
+
+       ret = ib_query_gid(rds_ibdev->dev, RDS_IB_DEFAULT_FREG_PORT_NUM,
+                          gid_index, &dgid);
+       if (ret) {
+               rds_rtd(RDS_RTD_ERR, "ib_query_gid failed: %d\n", ret);
+               goto clean_up;
+       }
+       rds_rtd(RDS_RTD_RDMA_IB,
+               "Successfully queried the gid_index %d and the gid is " RDS_IB_GID_FMT "\n",
+               gid_index, RDS_IB_GID_ARG(dgid));
+
+       /* Use modify_qp verb to change the state from INIT to RTR */
+       memset(&qp_attr, 0, sizeof(qp_attr));
+       qp_attr.qp_state        = IB_QPS_RTR;
+       qp_attr.path_mtu        = IB_MTU_4096;
+       qp_attr.dest_qp_num     = rds_ibdev->fastreg_qp->qp_num;
+       qp_attr.rq_psn          = 1;
+       qp_attr.ah_attr.ah_flags        = IB_AH_GRH;
+       qp_attr.ah_attr.dlid            = port_attr.lid;
+       qp_attr.ah_attr.sl              = 0;
+       qp_attr.ah_attr.src_path_bits   = 0;
+       qp_attr.ah_attr.port_num        = RDS_IB_DEFAULT_FREG_PORT_NUM;
+       qp_attr.ah_attr.grh.hop_limit   = 1;
+       qp_attr.ah_attr.grh.dgid        = dgid;
+       qp_attr.ah_attr.grh.sgid_index  = gid_index;
+
+       ret = ib_modify_qp(rds_ibdev->fastreg_qp, &qp_attr, IB_QP_STATE |
+                                               IB_QP_AV                |
+                                               IB_QP_PATH_MTU          |
+                                               IB_QP_DEST_QPN          |
+                                               IB_QP_RQ_PSN            |
+                                               IB_QP_MAX_DEST_RD_ATOMIC |
+                                               IB_QP_MIN_RNR_TIMER);
+       if (ret) {
+               rds_rtd(RDS_RTD_ERR, "ib_modify_qp to IB_QPS_RTR failed: %d\n",
+                       ret);
+               goto clean_up;
+       }
+       rds_rtd(RDS_RTD_RDMA_IB,
+               "Successfully moved qp to RTR state for ib_device: %p\n",
+               rds_ibdev->dev);
+
+       /* Use modify_qp verb to change the state from RTR to RTS */
+       memset(&qp_attr, 0, sizeof(qp_attr));
+       qp_attr.qp_state        = IB_QPS_RTS;
+       qp_attr.sq_psn          = 1;
+       qp_attr.timeout         = 14;
+       qp_attr.retry_cnt       = 6;
+       qp_attr.rnr_retry       = 6;
+       qp_attr.max_rd_atomic   = 1;
+
+       ret = ib_modify_qp(rds_ibdev->fastreg_qp, &qp_attr, IB_QP_STATE |
+                                               IB_QP_TIMEOUT           |
+                                               IB_QP_RETRY_CNT         |
+                                               IB_QP_RNR_RETRY         |
+                                               IB_QP_SQ_PSN            |
+                                               IB_QP_MAX_QP_RD_ATOMIC);
+       if (ret) {
+               rds_rtd(RDS_RTD_ERR, "ib_modify_qp to IB_QPS_RTS failed: %d\n",
+                       ret);
+               goto clean_up;
+       }
+       rds_rtd(RDS_RTD_RDMA_IB,
+               "Successfully moved qp to RTS state for ib_device: %p\n",
+               rds_ibdev->dev);
+
+       tasklet_init(&rds_ibdev->fastreg_tasklet, rds_ib_tasklet_fn_fastreg,
+                    (unsigned long)rds_ibdev);
+       atomic_set(&rds_ibdev->fastreg_wrs, RDS_IB_DEFAULT_FREG_WR);
+
+clean_up:
+       if (ret)
+               rds_ib_destroy_fastreg(rds_ibdev);
+       return ret;
+}
+
+void rds_ib_reset_fastreg(struct work_struct *work)
+{
+       struct rds_ib_device *rds_ibdev = container_of(work,
+                                                      struct rds_ib_device,
+                                                      fastreg_reset_w);
+
+       pr_warn("RDS: IB: Resetting fastreg qp\n");
+       /* Acquire write lock to stop posting on fastreg qp before resetting */
+       down_write(&rds_ibdev->fastreg_lock);
+
+       rds_ib_destroy_fastreg(rds_ibdev);
+       if (rds_ib_setup_fastreg(rds_ibdev)) {
+               /* Failing to setup fastreg qp at this stage is unexpected.
+                * If it happens, throw a warning, and return immediately,
+                * without up_writing the fastreg_lock.
+                */
+               pr_err("RDS: IB: Failed to setup fastreg resources in %s\n",
+                      __func__);
+               WARN_ON(1);
+               return;
+       }
+
+       up_write(&rds_ibdev->fastreg_lock);
+       pr_warn("RDS: IB: Finished resetting fastreg qp\n");
+}
index 88da05f8f56c8cfe4543002eec9d07d2145be84c..d9f0649bf249e460061783fd579a4bf7aa207cce 100644 (file)
@@ -1031,13 +1031,8 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
                goto out;
        }
 
-       if (conn) {
+       if (conn)
                ic = conn->c_transport_data;
-       } else if (rds_ibdev->use_fastreg) {
-               /* TODO: Add FRWR support for RDS_GET_MR */
-               ret = -EOPNOTSUPP;
-               goto out;
-       }
 
        if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
                ret = -ENODEV;
@@ -1146,13 +1141,25 @@ out_unmap:
        return ret;
 }
 
-static int rds_ib_rdma_build_fastreg(struct rds_ib_mr *ibmr)
+static int rds_ib_rdma_build_fastreg(struct rds_ib_device *rds_ibdev,
+                                    struct rds_ib_mr *ibmr)
 {
        struct ib_send_wr f_wr, *failed_wr;
+       struct ib_qp *qp;
+       atomic_t *n_wrs;
        int ret = 0;
 
-       while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
-               atomic_inc(&ibmr->ic->i_fastreg_wrs);
+       if (ibmr->ic) {
+               n_wrs = &ibmr->ic->i_fastreg_wrs;
+               qp = ibmr->ic->i_cm_id->qp;
+       } else {
+               down_read(&rds_ibdev->fastreg_lock);
+               n_wrs = &rds_ibdev->fastreg_wrs;
+               qp = rds_ibdev->fastreg_qp;
+       }
+
+       while (atomic_dec_return(n_wrs) <= 0) {
+               atomic_inc(n_wrs);
                /* Depending on how many times schedule() is called,
                 * we could replace it with wait_event() in future.
                 */
@@ -1178,10 +1185,10 @@ static int rds_ib_rdma_build_fastreg(struct rds_ib_mr *ibmr)
        f_wr.send_flags = IB_SEND_SIGNALED;
 
        failed_wr = &f_wr;
-       ret = ib_post_send(ibmr->ic->i_cm_id->qp, &f_wr, &failed_wr);
+       ret = ib_post_send(qp, &f_wr, &failed_wr);
        BUG_ON(failed_wr != &f_wr);
        if (ret) {
-               atomic_inc(&ibmr->ic->i_fastreg_wrs);
+               atomic_inc(n_wrs);
                ibmr->fr_state = MR_IS_INVALID;
                pr_warn_ratelimited("RDS/IB: %s:%d ib_post_send returned %d\n",
                                    __func__, __LINE__, ret);
@@ -1195,6 +1202,8 @@ static int rds_ib_rdma_build_fastreg(struct rds_ib_mr *ibmr)
        }
 
 out:
+       if (!ibmr->ic)
+               up_read(&rds_ibdev->fastreg_lock);
        return ret;
 }
 
@@ -1216,7 +1225,7 @@ static int rds_ib_map_fastreg_mr(struct rds_ib_device *rds_ibdev,
        if (ret)
                goto out;
 
-       ret = rds_ib_rdma_build_fastreg(ibmr);
+       ret = rds_ib_rdma_build_fastreg(rds_ibdev, ibmr);
        if (ret)
                goto out;
 
@@ -1243,6 +1252,8 @@ static int rds_ib_fastreg_inv(struct rds_ib_mr *ibmr)
        struct ib_send_wr s_wr, *failed_wr;
        int ret = 0;
 
+       down_read(&ibmr->device->fastreg_lock);
+
        if (ibmr->fr_state != MR_IS_VALID)
                goto out;
 
@@ -1255,7 +1266,7 @@ static int rds_ib_fastreg_inv(struct rds_ib_mr *ibmr)
        s_wr.send_flags = IB_SEND_SIGNALED;
 
        failed_wr = &s_wr;
-       ret = ib_post_send(ibmr->ic->i_cm_id->qp, &s_wr, &failed_wr);
+       ret = ib_post_send(ibmr->device->fastreg_qp, &s_wr, &failed_wr);
        BUG_ON(failed_wr != &s_wr);
        if (ret) {
                ibmr->fr_state = MR_IS_STALE;
@@ -1265,11 +1276,33 @@ static int rds_ib_fastreg_inv(struct rds_ib_mr *ibmr)
        }
 
        wait_for_completion(&ibmr->wr_comp);
-
 out:
+       up_read(&ibmr->device->fastreg_lock);
        return ret;
 }
 
+void rds_ib_fcq_handler(struct rds_ib_device *rds_ibdev, struct ib_wc *wc)
+{
+       struct rds_ib_mr *ibmr = (struct rds_ib_mr *)wc->wr_id;
+       enum rds_ib_fr_state fr_state = ibmr->fr_state;
+
+       WARN_ON(ibmr->fr_state == MR_IS_STALE);
+
+       if (wc->status != IB_WC_SUCCESS) {
+               pr_warn("RDS: IB: MR completion on fastreg qp status %u vendor_err %u\n",
+                       wc->status, wc->vendor_err);
+               ibmr->fr_state = MR_IS_STALE;
+               queue_work(rds_wq, &rds_ibdev->fastreg_reset_w);
+       }
+
+       if (fr_state == MR_IS_INVALID) {
+               complete(&ibmr->wr_comp);
+       } else if (fr_state == MR_IS_VALID) {
+               atomic_inc(&rds_ibdev->fastreg_wrs);
+               complete(&ibmr->wr_comp);
+       }
+}
+
 void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
 {
        struct rds_ib_mr *ibmr = (struct rds_ib_mr *)wc->wr_id;