}
        sge->length = len;
 
-       ib_dma_sync_single_for_device(ia->ri_device, sge->addr,
+       ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
                                      sge->length, DMA_TO_DEVICE);
        req->rl_send_wr.num_sge++;
        return true;
        sge[sge_no].addr = rdmab_addr(rb);
        sge[sge_no].length = xdr->head[0].iov_len;
        sge[sge_no].lkey = rdmab_lkey(rb);
-       ib_dma_sync_single_for_device(device, sge[sge_no].addr,
+       ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
                                      sge[sge_no].length, DMA_TO_DEVICE);
 
        /* If there is a Read chunk, the page list is being handled
 
        rep->rr_wc_flags = wc->wc_flags;
        rep->rr_inv_rkey = wc->ex.invalidate_rkey;
 
-       ib_dma_sync_single_for_cpu(rep->rr_device,
+       ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
                                   rdmab_addr(rep->rr_rdmabuf),
                                   rep->rr_len, DMA_FROM_DEVICE);
 
 rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
 {
        struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
-       struct rpcrdma_ia *ia = &r_xprt->rx_ia;
        struct rpcrdma_rep *rep;
        int rc;
 
                goto out_free;
        }
 
-       rep->rr_device = ia->ri_device;
        rep->rr_cqe.done = rpcrdma_wc_receive;
        rep->rr_rxprt = r_xprt;
        INIT_WORK(&rep->rr_work, rpcrdma_reply_handler);
 bool
 __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
 {
+       struct ib_device *device = ia->ri_device;
+
        if (rb->rg_direction == DMA_NONE)
                return false;
 
-       rb->rg_iov.addr = ib_dma_map_single(ia->ri_device,
+       rb->rg_iov.addr = ib_dma_map_single(device,
                                            (void *)rb->rg_base,
                                            rdmab_length(rb),
                                            rb->rg_direction);
-       if (ib_dma_mapping_error(ia->ri_device, rdmab_addr(rb)))
+       if (ib_dma_mapping_error(device, rdmab_addr(rb)))
                return false;
 
-       rb->rg_device = ia->ri_device;
+       rb->rg_device = device;
        rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey;
        return true;
 }
 
        return (struct rpcrdma_msg *)rb->rg_base;
 }
 
+static inline struct ib_device *
+rdmab_device(struct rpcrdma_regbuf *rb)
+{
+       return rb->rg_device;
+}
+
 #define RPCRDMA_DEF_GFP                (GFP_NOIO | __GFP_NOWARN)
 
 /* To ensure a transport can always make forward progress,
        unsigned int            rr_len;
        int                     rr_wc_flags;
        u32                     rr_inv_rkey;
-       struct ib_device        *rr_device;
        struct rpcrdma_xprt     *rr_rxprt;
        struct work_struct      rr_work;
        struct list_head        rr_list;