]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
svcrdma: Add svc_rdma_get_context() API that is allowed to fail
authorChuck Lever <chuck.lever@oracle.com>
Wed, 26 Aug 2015 20:27:00 +0000 (14:27 -0600)
committerChuck Lever <chuck.lever@oracle.com>
Mon, 31 Aug 2015 20:46:03 +0000 (14:46 -0600)
[ Proposed for v4.4 ]

To support backward direction calls, I'm going to add an
svc_rdma_get_context() call in the client RDMA transport.

Called from ->buf_alloc(), we can't sleep waiting for memory.
So add an API that can get a server op_ctxt but won't sleep.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
include/linux/sunrpc/svc_rdma.h
net/sunrpc/xprtrdma/svc_rdma_transport.c

index a91f760254d9e27ff6b436dd4b0c404db0b72bf8..ae582837752288723fb61b1f079a979ce1c3cb34 100644 (file)
@@ -223,6 +223,8 @@ extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
 extern int svc_rdma_post_recv(struct svcxprt_rdma *);
 extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
 extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
+extern struct svc_rdma_op_ctxt *svc_rdma_get_context_gfp(struct svcxprt_rdma *,
+                                                        gfp_t);
 extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
 extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
 extern struct svc_rdma_req_map *svc_rdma_get_req_map(void);
index e35cf9c5fe81305c922e05f1c0b874ca44a27ef7..bc8b46e49638df3252b1df6e3c73b70a8ae8c6af 100644 (file)
@@ -153,17 +153,35 @@ static void svc_rdma_bc_free(struct svc_xprt *xprt)
 }
 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
 
-struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
+static void svc_rdma_init_context(struct svcxprt_rdma *xprt,
+                                 struct svc_rdma_op_ctxt *ctxt)
 {
-       struct svc_rdma_op_ctxt *ctxt;
-
-       ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep,
-                               GFP_KERNEL | __GFP_NOFAIL);
        ctxt->xprt = xprt;
        INIT_LIST_HEAD(&ctxt->dto_q);
        ctxt->count = 0;
        ctxt->frmr = NULL;
        atomic_inc(&xprt->sc_ctxt_used);
+}
+
+struct svc_rdma_op_ctxt *svc_rdma_get_context_gfp(struct svcxprt_rdma *xprt,
+                                                 gfp_t flags)
+{
+       struct svc_rdma_op_ctxt *ctxt;
+
+       ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, flags);
+       if (!ctxt)
+               return NULL;
+       svc_rdma_init_context(xprt, ctxt);
+       return ctxt;
+}
+
+struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
+{
+       struct svc_rdma_op_ctxt *ctxt;
+
+       ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep,
+                               GFP_KERNEL | __GFP_NOFAIL);
+       svc_rdma_init_context(xprt, ctxt);
        return ctxt;
 }