__func__, (int)rpclen, rqst->rq_svec[0].iov_base);
 #endif
 
+       if (!rpcrdma_dma_map_regbuf(&r_xprt->rx_ia, req->rl_rdmabuf))
+               goto out_map;
        req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
        req->rl_send_iov[0].length = RPCRDMA_HDRLEN_MIN;
        req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
 
+       if (!rpcrdma_dma_map_regbuf(&r_xprt->rx_ia, req->rl_sendbuf))
+               goto out_map;
        req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
        req->rl_send_iov[1].length = rpclen;
        req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
 
        req->rl_niovs = 2;
        return 0;
+
+out_map:
+       pr_err("rpcrdma: failed to DMA map a Send buffer\n");
+       return -EIO;
 }
 
 /**
 
                transfertypes[rtype], transfertypes[wtype],
                hdrlen, rpclen);
 
+       if (!rpcrdma_dma_map_regbuf(&r_xprt->rx_ia, req->rl_rdmabuf))
+               goto out_map;
        req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
        req->rl_send_iov[0].length = hdrlen;
        req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
        if (rtype == rpcrdma_areadch)
                return 0;
 
+       if (!rpcrdma_dma_map_regbuf(&r_xprt->rx_ia, req->rl_sendbuf))
+               goto out_map;
        req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
        req->rl_send_iov[1].length = rpclen;
        req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
 out_unmap:
        r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false);
        return PTR_ERR(iptr);
+
+out_map:
+       pr_err("rpcrdma: failed to DMA map a Send buffer\n");
+       iptr = ERR_PTR(-EIO);
+       goto out_unmap;
 }
 
 /*
 
  * @direction: direction of data movement
  * @flags: GFP flags
  *
- * Returns an ERR_PTR, or a pointer to a regbuf, which is a
- * contiguous memory region that is DMA mapped persistently, and
- * is registered for local I/O.
+ * Returns an ERR_PTR, or a pointer to a regbuf, a buffer that
+ * can be persistently DMA-mapped for I/O.
  *
  * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
  * receiving the payload of RDMA RECV operations. During Long Calls
                     enum dma_data_direction direction, gfp_t flags)
 {
        struct rpcrdma_regbuf *rb;
-       struct ib_sge *iov;
 
        rb = kmalloc(sizeof(*rb) + size, flags);
        if (rb == NULL)
-               goto out;
+               return ERR_PTR(-ENOMEM);
 
+       rb->rg_device = NULL;
        rb->rg_direction = direction;
-       iov = &rb->rg_iov;
-       iov->length = size;
-       iov->lkey = ia->ri_pd->local_dma_lkey;
-
-       if (direction != DMA_NONE) {
-               iov->addr = ib_dma_map_single(ia->ri_device,
-                                             (void *)rb->rg_base,
-                                             rdmab_length(rb),
-                                             rb->rg_direction);
-               if (ib_dma_mapping_error(ia->ri_device, iov->addr))
-                       goto out_free;
-       }
+       rb->rg_iov.length = size;
 
        return rb;
+}
 
-out_free:
-       kfree(rb);
-out:
-       return ERR_PTR(-ENOMEM);
+/**
+ * __rpcrdma_map_regbuf - DMA-map a regbuf
+ * @ia: controlling rpcrdma_ia
+ * @rb: regbuf to be mapped
+ */
+bool
+__rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
+{
+       if (rb->rg_direction == DMA_NONE)
+               return false;
+
+       rb->rg_iov.addr = ib_dma_map_single(ia->ri_device,
+                                           (void *)rb->rg_base,
+                                           rdmab_length(rb),
+                                           rb->rg_direction);
+       if (ib_dma_mapping_error(ia->ri_device, rdmab_addr(rb)))
+               return false;
+
+       rb->rg_device = ia->ri_device;
+       rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey;
+       return true;
+}
+
+static void
+rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb)
+{
+       if (!rpcrdma_regbuf_is_mapped(rb))
+               return;
+
+       ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb),
+                           rdmab_length(rb), rb->rg_direction);
+       rb->rg_device = NULL;
 }
 
 /**
        if (!rb)
                return;
 
-       if (rb->rg_direction != DMA_NONE) {
-               ib_dma_unmap_single(ia->ri_device, rdmab_addr(rb),
-                                   rdmab_length(rb), rb->rg_direction);
-       }
-
+       rpcrdma_dma_unmap_regbuf(rb);
        kfree(rb);
 }
 
        recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
        recv_wr.num_sge = 1;
 
+       if (!rpcrdma_dma_map_regbuf(ia, rep->rr_rdmabuf))
+               goto out_map;
        rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);
        if (rc)
                goto out_postrecv;
        return 0;
 
+out_map:
+       pr_err("rpcrdma: failed to DMA map the Receive buffer\n");
+       return -EIO;
+
 out_postrecv:
        pr_err("rpcrdma: ib_post_recv returned %i\n", rc);
        return -ENOTCONN;
 
 
 struct rpcrdma_regbuf {
        struct ib_sge           rg_iov;
+       struct ib_device        *rg_device;
        enum dma_data_direction rg_direction;
        __be32                  rg_base[0] __attribute__ ((aligned(256)));
 };
 struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *,
                                            size_t, enum dma_data_direction,
                                            gfp_t);
+bool __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *, struct rpcrdma_regbuf *);
 void rpcrdma_free_regbuf(struct rpcrdma_ia *,
                         struct rpcrdma_regbuf *);
 
+static inline bool
+rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb)
+{
+       return rb->rg_device != NULL;
+}
+
+static inline bool
+rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
+{
+       if (likely(rpcrdma_regbuf_is_mapped(rb)))
+               return true;
+       return __rpcrdma_dma_map_regbuf(ia, rb);
+}
+
 int rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *, unsigned int);
 
 int rpcrdma_alloc_wq(void);