/* Maximum scatter/gather per FMR */
 #define RPCRDMA_MAX_FMR_SGES   (64)
 
+static int
+fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
+           struct rpcrdma_create_data_internal *cdata)
+{
+       return 0;
+}
+
 /* FMR mode conveys up to 64 pages of payload per chunk segment.
  */
 static size_t
 const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
        .ro_map                         = fmr_op_map,
        .ro_unmap                       = fmr_op_unmap,
+       .ro_open                        = fmr_op_open,
        .ro_maxpages                    = fmr_op_maxpages,
        .ro_init                        = fmr_op_init,
        .ro_reset                       = fmr_op_reset,
 
        ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
 }
 
+static int
+frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
+            struct rpcrdma_create_data_internal *cdata)
+{
+       struct ib_device_attr *devattr = &ia->ri_devattr;
+       int depth, delta;
+
+       ia->ri_max_frmr_depth =
+                       min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
+                             devattr->max_fast_reg_page_list_len);
+       dprintk("RPC:       %s: device's max FR page list len = %u\n",
+               __func__, ia->ri_max_frmr_depth);
+
+       /* Add room for frmr register and invalidate WRs.
+        * 1. FRMR reg WR for head
+        * 2. FRMR invalidate WR for head
+        * 3. N FRMR reg WRs for pagelist
+        * 4. N FRMR invalidate WRs for pagelist
+        * 5. FRMR reg WR for tail
+        * 6. FRMR invalidate WR for tail
+        * 7. The RDMA_SEND WR
+        */
+       depth = 7;
+
+       /* Calculate N if the device max FRMR depth is smaller than
+        * RPCRDMA_MAX_DATA_SEGS.
+        */
+       if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
+               delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
+               do {
+                       depth += 2; /* FRMR reg + invalidate */
+                       delta -= ia->ri_max_frmr_depth;
+               } while (delta > 0);
+       }
+
+       ep->rep_attr.cap.max_send_wr *= depth;
+       if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) {
+               cdata->max_requests = devattr->max_qp_wr / depth;
+               if (!cdata->max_requests)
+                       return -EINVAL;
+               ep->rep_attr.cap.max_send_wr = cdata->max_requests *
+                                              depth;
+       }
+
+       return 0;
+}
+
 /* FRWR mode conveys a list of pages per chunk segment. The
  * maximum length of that list is the FRWR page list depth.
  */
 const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
        .ro_map                         = frwr_op_map,
        .ro_unmap                       = frwr_op_unmap,
+       .ro_open                        = frwr_op_open,
        .ro_maxpages                    = frwr_op_maxpages,
        .ro_init                        = frwr_op_init,
        .ro_reset                       = frwr_op_reset,
 
 # define RPCDBG_FACILITY       RPCDBG_TRANS
 #endif
 
+static int
+physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
+                struct rpcrdma_create_data_internal *cdata)
+{
+       return 0;
+}
+
 /* PHYSICAL memory registration conveys one page per chunk segment.
  */
 static size_t
 const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
        .ro_map                         = physical_op_map,
        .ro_unmap                       = physical_op_unmap,
+       .ro_open                        = physical_op_open,
        .ro_maxpages                    = physical_op_maxpages,
        .ro_init                        = physical_op_init,
        .ro_reset                       = physical_op_reset,
 
                        dprintk("RPC:       %s: FRMR registration "
                                "not supported by HCA\n", __func__);
                        memreg = RPCRDMA_MTHCAFMR;
-               } else {
-                       /* Mind the ia limit on FRMR page list depth */
-                       ia->ri_max_frmr_depth = min_t(unsigned int,
-                               RPCRDMA_MAX_DATA_SEGS,
-                               devattr->max_fast_reg_page_list_len);
                }
        }
        if (memreg == RPCRDMA_MTHCAFMR) {
 
        ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
        ep->rep_attr.qp_context = ep;
-       /* send_cq and recv_cq initialized below */
        ep->rep_attr.srq = NULL;
        ep->rep_attr.cap.max_send_wr = cdata->max_requests;
-       switch (ia->ri_memreg_strategy) {
-       case RPCRDMA_FRMR: {
-               int depth = 7;
-
-               /* Add room for frmr register and invalidate WRs.
-                * 1. FRMR reg WR for head
-                * 2. FRMR invalidate WR for head
-                * 3. N FRMR reg WRs for pagelist
-                * 4. N FRMR invalidate WRs for pagelist
-                * 5. FRMR reg WR for tail
-                * 6. FRMR invalidate WR for tail
-                * 7. The RDMA_SEND WR
-                */
-
-               /* Calculate N if the device max FRMR depth is smaller than
-                * RPCRDMA_MAX_DATA_SEGS.
-                */
-               if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
-                       int delta = RPCRDMA_MAX_DATA_SEGS -
-                                   ia->ri_max_frmr_depth;
-
-                       do {
-                               depth += 2; /* FRMR reg + invalidate */
-                               delta -= ia->ri_max_frmr_depth;
-                       } while (delta > 0);
-
-               }
-               ep->rep_attr.cap.max_send_wr *= depth;
-               if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) {
-                       cdata->max_requests = devattr->max_qp_wr / depth;
-                       if (!cdata->max_requests)
-                               return -EINVAL;
-                       ep->rep_attr.cap.max_send_wr = cdata->max_requests *
-                                                      depth;
-               }
-               break;
-       }
-       default:
-               break;
-       }
+       rc = ia->ri_ops->ro_open(ia, ep, cdata);
+       if (rc)
+               return rc;
        ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
        ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2);
        ep->rep_attr.cap.max_recv_sge = 1;
 
                                  struct rpcrdma_mr_seg *, int, bool);
        int             (*ro_unmap)(struct rpcrdma_xprt *,
                                    struct rpcrdma_mr_seg *);
+       int             (*ro_open)(struct rpcrdma_ia *,
+                                  struct rpcrdma_ep *,
+                                  struct rpcrdma_create_data_internal *);
        size_t          (*ro_maxpages)(struct rpcrdma_xprt *);
        int             (*ro_init)(struct rpcrdma_xprt *);
        void            (*ro_reset)(struct rpcrdma_xprt *);