#define ISER_MAX_CQ 4
 
+struct iser_conn;
+struct iscsi_iser_task;
+
 struct iser_device {
        struct ib_device             *ib_device;
        struct ib_pd                 *pd;
        int                          cq_active_qps[ISER_MAX_CQ];
        int                          cqs_used;
        struct iser_cq_desc          *cq_desc;
+       int                          (*iser_alloc_rdma_reg_res)(struct iser_conn *ib_conn,
+                                                               unsigned cmds_max);
+       void                         (*iser_free_rdma_reg_res)(struct iser_conn *ib_conn);
+       int                          (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task,
+                                                         enum iser_data_dir cmd_dir);
+       void                         (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task,
+                                                           enum iser_data_dir cmd_dir);
 };
 
 struct iser_conn {
                       struct iser_page_vec *page_vec,
                       struct iser_mem_reg  *mem_reg);
 
-void iser_unreg_mem(struct iser_mem_reg *mem_reg);
+void iser_unreg_mem(struct iscsi_iser_task *iser_task,
+                   enum iser_data_dir cmd_dir);
 
 int  iser_post_recvl(struct iser_conn *ib_conn);
 int  iser_post_recvm(struct iser_conn *ib_conn, int count);
 
 
 {
        struct iscsi_iser_task *iser_task = task->dd_data;
+       struct iser_device  *device = iser_task->iser_conn->ib_conn->device;
        struct iser_regd_buf *regd_buf;
        int err;
        struct iser_hdr *hdr = &iser_task->desc.iser_header;
                return -EINVAL;
        }
 
-       err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
+       err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
        if (err) {
                iser_err("Failed to set up Data-IN RDMA\n");
                return err;
                       unsigned int edtl)
 {
        struct iscsi_iser_task *iser_task = task->dd_data;
+       struct iser_device  *device = iser_task->iser_conn->ib_conn->device;
        struct iser_regd_buf *regd_buf;
        int err;
        struct iser_hdr *hdr = &iser_task->desc.iser_header;
                return -EINVAL;
        }
 
-       err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
+       err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
        if (err != 0) {
                iser_err("Failed to register write cmd RDMA mem\n");
                return err;
        ib_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
        ib_conn->min_posted_rx = ib_conn->qp_max_recv_dtos >> 2;
 
-       if (iser_create_fmr_pool(ib_conn, session->scsi_cmds_max))
-               goto create_fmr_pool_failed;
+       if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max))
+               goto create_rdma_reg_res_failed;
 
        if (iser_alloc_login_buf(ib_conn))
                goto alloc_login_buf_fail;
 rx_desc_alloc_fail:
        iser_free_login_buf(ib_conn);
 alloc_login_buf_fail:
-       iser_free_fmr_pool(ib_conn);
-create_fmr_pool_failed:
+       device->iser_free_rdma_reg_res(ib_conn);
+create_rdma_reg_res_failed:
        iser_err("failed allocating rx descriptors / data buffers\n");
        return -ENOMEM;
 }
        if (!ib_conn->rx_descs)
                goto free_login_buf;
 
+       if (device && device->iser_free_rdma_reg_res)
+               device->iser_free_rdma_reg_res(ib_conn);
+
        rx_desc = ib_conn->rx_descs;
        for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++)
                ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
 
 free_login_buf:
        iser_free_login_buf(ib_conn);
-       iser_free_fmr_pool(ib_conn);
 }
 
 static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
 
 void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
 {
+       struct iser_device *device = iser_task->iser_conn->ib_conn->device;
        int is_rdma_aligned = 1;
-       struct iser_regd_buf *regd;
 
        /* if we were reading, copy back to unaligned sglist,
         * anyway dma_unmap and free the copy
                iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
        }
 
-       if (iser_task->dir[ISER_DIR_IN]) {
-               regd = &iser_task->rdma_regd[ISER_DIR_IN];
-               if (regd->reg.is_fmr)
-                       iser_unreg_mem(®d->reg);
-       }
+       if (iser_task->dir[ISER_DIR_IN])
+               device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
 
-       if (iser_task->dir[ISER_DIR_OUT]) {
-               regd = &iser_task->rdma_regd[ISER_DIR_OUT];
-               if (regd->reg.is_fmr)
-                       iser_unreg_mem(®d->reg);
-       }
+       if (iser_task->dir[ISER_DIR_OUT])
+               device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
 
        /* if the data was unaligned, it was already unmapped and then copied */
        if (is_rdma_aligned)
 
        int i, j;
        struct iser_cq_desc *cq_desc;
 
+       /* Assign function handles */
+       device->iser_alloc_rdma_reg_res = iser_create_fmr_pool;
+       device->iser_free_rdma_reg_res = iser_free_fmr_pool;
+       device->iser_reg_rdma_mem = iser_reg_rdma_mem;
+       device->iser_unreg_rdma_mem = iser_unreg_mem;
+
        device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
        iser_info("using %d CQs, device %s supports %d vectors\n",
                  device->cqs_used, device->ib_device->name,
 /**
  * Unregister (previosuly registered) memory.
  */
-void iser_unreg_mem(struct iser_mem_reg *reg)
+void iser_unreg_mem(struct iscsi_iser_task *iser_task,
+                   enum iser_data_dir cmd_dir)
 {
+       struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
        int ret;
 
+       if (!reg->is_fmr)
+               return;
+
        iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);
 
        ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);