From 859b604e7da69f65edc9dce711d32cba3e33ce02 Mon Sep 17 00:00:00 2001 From: Jim Mott Date: Tue, 23 Oct 2007 10:57:33 -0700 Subject: [PATCH] SDP bug647 - Validate ChRcvBuf range and add comments Clean up the buffer resize code to comply with CA4-83: Upon receipt of ChRcvBuf message, the remote peer shall not change the buffer size in the direction opposite of that requested. Also add some comments and pretty up the code. Signed-off-by: Jim Mott --- drivers/infiniband/ulp/sdp/sdp.h | 20 ++++--- drivers/infiniband/ulp/sdp/sdp_bcopy.c | 81 ++++++++++++++++---------- drivers/infiniband/ulp/sdp/sdp_cma.c | 1 + drivers/infiniband/ulp/sdp/sdp_main.c | 3 +- 4 files changed, 64 insertions(+), 41 deletions(-) diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h index 5366df4d6591..d98ee3369581 100644 --- a/drivers/infiniband/ulp/sdp/sdp.h +++ b/drivers/infiniband/ulp/sdp/sdp.h @@ -147,13 +147,16 @@ struct sdp_sock { struct ib_send_wr tx_wr; /* SDP slow start */ - int rcvbuf_scale; - int sent_request; - int sent_request_head; - int recv_request_head; - int recv_request; - int recv_frags; - int send_frags; + int rcvbuf_scale; /* local recv buf scale for each socket */ + int sent_request_head; /* mark the tx_head of the last send resize + request */ + int sent_request; /* 0 - not sent yet, 1 - request pending + -1 - resize done succesfully */ + int recv_request_head; /* mark the rx_head when the resize request + was recieved */ + int recv_request; /* flag if request to resize was recieved */ + int recv_frags; /* max skb frags in recv packets */ + int send_frags; /* max skb frags in send packets */ struct ib_sge ibsge[SDP_MAX_SEND_SKB_FRAGS + 1]; struct ib_wc ibwc[SDP_NUM_WC]; @@ -226,7 +229,8 @@ struct sk_buff *sdp_send_completion(struct sdp_sock *ssk, int mseq); void sdp_urg(struct sdp_sock *ssk, struct sk_buff *skb); void sdp_add_sock(struct sdp_sock *ssk); void sdp_remove_sock(struct sdp_sock *ssk); -void sdp_remove_large_sock(void); +void sdp_remove_large_sock(struct sdp_sock *ssk); +int sdp_resize_buffers(struct sdp_sock *ssk, u32 new_size); void sdp_post_keepalive(struct sdp_sock *ssk); void sdp_start_keepalive_timer(struct sock *sk); diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c index 41ae38e0da8c..feccc31157cc 100644 --- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c +++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c @@ -70,9 +70,13 @@ static int curr_large_sockets = 0; atomic_t sdp_current_mem_usage; spinlock_t sdp_large_sockets_lock; -static int sdp_can_resize(void) +static int sdp_get_large_socket(struct sdp_sock *ssk) { int count, ret; + + if (ssk->recv_request) + return 1; + spin_lock_irq(&sdp_large_sockets_lock); count = curr_large_sockets; ret = curr_large_sockets < max_large_sockets; @@ -83,11 +87,13 @@ static int sdp_can_resize(void) return ret; } -void sdp_remove_large_sock(void) +void sdp_remove_large_sock(struct sdp_sock *ssk) { - spin_lock_irq(&sdp_large_sockets_lock); - curr_large_sockets--; - spin_unlock_irq(&sdp_large_sockets_lock); + if (ssk->recv_frags) { + spin_lock_irq(&sdp_large_sockets_lock); + curr_large_sockets--; + spin_unlock_irq(&sdp_large_sockets_lock); + } } /* Like tcp_fin */ @@ -443,7 +449,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle) /* FIXME */ BUG_ON(!skb); resp_size = (struct sdp_chrecvbuf *)skb_put(skb, sizeof *resp_size); - resp_size->size = htons(ssk->recv_frags * PAGE_SIZE); + resp_size->size = htonl(ssk->recv_frags * PAGE_SIZE); sdp_post_send(ssk, skb, SDP_MID_CHRCVBUF_ACK); } @@ -470,7 +476,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle) ssk->sent_request = SDP_MAX_SEND_SKB_FRAGS * PAGE_SIZE; ssk->sent_request_head = ssk->tx_head; req_size = (struct sdp_chrecvbuf *)skb_put(skb, sizeof *req_size); - req_size->size = htons(ssk->sent_request); + req_size->size = htonl(ssk->sent_request); sdp_post_send(ssk, skb, SDP_MID_CHRCVBUF); } @@ -506,11 +512,42 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle) } } -static inline void sdp_resize(struct sdp_sock *ssk, u32 new_size) +int sdp_resize_buffers(struct sdp_sock *ssk, u32 new_size) { - ssk->recv_frags = PAGE_ALIGN(new_size - SDP_HEAD_SIZE) / PAGE_SIZE; - if (ssk->recv_frags > SDP_MAX_SEND_SKB_FRAGS) - ssk->recv_frags = SDP_MAX_SEND_SKB_FRAGS; + u32 curr_size = SDP_HEAD_SIZE + ssk->recv_frags * PAGE_SIZE; + u32 max_size = SDP_HEAD_SIZE + SDP_MAX_SEND_SKB_FRAGS * PAGE_SIZE; + + if (new_size > curr_size && new_size <= max_size && + sdp_get_large_socket(ssk)) { + ssk->rcvbuf_scale = rcvbuf_scale; + ssk->recv_frags = PAGE_ALIGN(new_size - SDP_HEAD_SIZE) / PAGE_SIZE; + if (ssk->recv_frags > SDP_MAX_SEND_SKB_FRAGS) + ssk->recv_frags = SDP_MAX_SEND_SKB_FRAGS; + return 0; + } else + return -1; +} + +static void sdp_handle_resize_request(struct sdp_sock *ssk, struct sdp_chrecvbuf *buf) +{ + if (sdp_resize_buffers(ssk, ntohl(buf->size)) == 0) + ssk->recv_request_head = ssk->rx_head + 1; + else + ssk->recv_request_head = ssk->rx_tail; + ssk->recv_request = 1; +} + +static void sdp_handle_resize_ack(struct sdp_sock *ssk, struct sdp_chrecvbuf *buf) +{ + u32 new_size = ntohl(buf->size); + + if (new_size > ssk->xmit_size_goal) { + ssk->sent_request = -1; + ssk->xmit_size_goal = new_size; + ssk->send_frags = + PAGE_ALIGN(ssk->xmit_size_goal) / PAGE_SIZE; + } else + ssk->sent_request = 0; } static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc) @@ -590,28 +627,10 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc) sdp_sock_queue_rcv_skb(&ssk->isk.sk, skb); sdp_fin(&ssk->isk.sk); } else if (h->mid == SDP_MID_CHRCVBUF) { - u32 new_size = *(u32 *)skb->data; - - if (ssk->recv_request || sdp_can_resize()) { - ssk->rcvbuf_scale = rcvbuf_scale; - sdp_resize(ssk, ntohs(new_size)); - ssk->recv_request_head = ssk->rx_head + 1; - } else - ssk->recv_request_head = ssk->rx_tail; - ssk->recv_request = 1; + sdp_handle_resize_request(ssk, (struct sdp_chrecvbuf *)skb->data); __kfree_skb(skb); } else if (h->mid == SDP_MID_CHRCVBUF_ACK) { - u32 new_size = *(u32 *)skb->data; - new_size = ntohs(new_size); - - if (new_size > ssk->xmit_size_goal) { - ssk->sent_request = -1; - ssk->xmit_size_goal = new_size; - ssk->send_frags = - PAGE_ALIGN(ssk->xmit_size_goal) / - PAGE_SIZE; - } else - ssk->sent_request = 0; + sdp_handle_resize_ack(ssk, (struct sdp_chrecvbuf *)skb->data); __kfree_skb(skb); } else { /* TODO: Handle other messages */ diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c index 8da10298a5d3..5e1480ec17d9 100644 --- a/drivers/infiniband/ulp/sdp/sdp_cma.c +++ b/drivers/infiniband/ulp/sdp/sdp_cma.c @@ -239,6 +239,7 @@ int sdp_connect_handler(struct sock *sk, struct rdma_cm_id *id, sizeof(struct sdp_bsdh); sdp_sk(child)->send_frags = PAGE_ALIGN(sdp_sk(child)->xmit_size_goal) / PAGE_SIZE; + sdp_resize_buffers(sdp_sk(child), ntohl(h->desremrcvsz)); sdp_dbg(child, "%s bufs %d xmit_size_goal %d\n", __func__, sdp_sk(child)->bufs, diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c index 0cbeccba00c9..8ef98b2a56c1 100644 --- a/drivers/infiniband/ulp/sdp/sdp_main.c +++ b/drivers/infiniband/ulp/sdp/sdp_main.c @@ -224,8 +224,7 @@ static void sdp_destroy_qp(struct sdp_sock *ssk) if (pd) ib_dealloc_pd(pd); - if (ssk->recv_frags) - sdp_remove_large_sock(); + sdp_remove_large_sock(ssk); kfree(ssk->rx_ring); kfree(ssk->tx_ring); -- 2.50.1