From 361583a43d1ab477d222e9440c3c7da30dddea38 Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Tue, 24 Nov 2009 11:01:05 +0200 Subject: [PATCH] sdp: fixed BUG1826 part 1 - schedule while atomic Allocate skb according to context Signed-off-by: Amir Vadai --- drivers/infiniband/ulp/sdp/sdp.h | 46 ++++++++++++++------------ drivers/infiniband/ulp/sdp/sdp_bcopy.c | 16 +++------ drivers/infiniband/ulp/sdp/sdp_main.c | 2 +- drivers/infiniband/ulp/sdp/sdp_tx.c | 2 +- drivers/infiniband/ulp/sdp/sdp_zcopy.c | 10 +++--- 5 files changed, 37 insertions(+), 39 deletions(-) diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h index 930d68cf6bbb..a1de85b535aa 100644 --- a/drivers/infiniband/ulp/sdp/sdp.h +++ b/drivers/infiniband/ulp/sdp/sdp.h @@ -571,16 +571,18 @@ static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, return NULL; } -static inline struct sk_buff *sdp_alloc_skb(struct sock *sk, u8 mid, int size) +static inline struct sk_buff *sdp_alloc_skb(struct sock *sk, u8 mid, int size, + gfp_t gfp) { struct sdp_bsdh *h; struct sk_buff *skb; - gfp_t gfp; - if (unlikely(sk->sk_allocation)) - gfp = sk->sk_allocation; - else - gfp = GFP_KERNEL; + if (!gfp) { + if (unlikely(sk->sk_allocation)) + gfp = sk->sk_allocation; + else + gfp = GFP_KERNEL; + } skb = sdp_stream_alloc_skb(sk, sizeof(struct sdp_bsdh) + size, gfp); BUG_ON(!skb); @@ -594,23 +596,24 @@ static inline struct sk_buff *sdp_alloc_skb(struct sock *sk, u8 mid, int size) return skb; } -static inline struct sk_buff *sdp_alloc_skb_data(struct sock *sk) +static inline struct sk_buff *sdp_alloc_skb_data(struct sock *sk, gfp_t gfp) { - return sdp_alloc_skb(sk, SDP_MID_DATA, 0); + return sdp_alloc_skb(sk, SDP_MID_DATA, 0, gfp); } -static inline struct sk_buff *sdp_alloc_skb_disconnect(struct sock *sk) +static inline struct sk_buff *sdp_alloc_skb_disconnect(struct sock *sk, + gfp_t gfp) { - return sdp_alloc_skb(sk, SDP_MID_DISCONN, 0); + return sdp_alloc_skb(sk, SDP_MID_DISCONN, 0, gfp); } static inline struct sk_buff *sdp_alloc_skb_chrcvbuf_ack(struct sock *sk, - int size) + int size, gfp_t gfp) { struct sk_buff *skb; struct sdp_chrecvbuf *resp_size; - skb = sdp_alloc_skb(sk, SDP_MID_CHRCVBUF_ACK, sizeof(*resp_size)); + skb = sdp_alloc_skb(sk, SDP_MID_CHRCVBUF_ACK, sizeof(*resp_size), gfp); resp_size = (struct sdp_chrecvbuf *)skb_put(skb, sizeof *resp_size); resp_size->size = htonl(size); @@ -619,12 +622,12 @@ static inline struct sk_buff *sdp_alloc_skb_chrcvbuf_ack(struct sock *sk, } static inline struct sk_buff *sdp_alloc_skb_srcavail(struct sock *sk, - u32 len, u32 rkey, u64 vaddr) + u32 len, u32 rkey, u64 vaddr, gfp_t gfp) { struct sk_buff *skb; struct sdp_srcah *srcah; - skb = sdp_alloc_skb(sk, SDP_MID_SRCAVAIL, sizeof(*srcah)); + skb = sdp_alloc_skb(sk, SDP_MID_SRCAVAIL, sizeof(*srcah), gfp); srcah = (struct sdp_srcah *)skb_put(skb, sizeof(*srcah)); srcah->len = htonl(len); @@ -634,18 +637,19 @@ static inline struct sk_buff *sdp_alloc_skb_srcavail(struct sock *sk, return skb; } -static inline struct sk_buff *sdp_alloc_skb_srcavail_cancel(struct sock *sk) +static inline struct sk_buff *sdp_alloc_skb_srcavail_cancel(struct sock *sk, + gfp_t gfp) { - return sdp_alloc_skb(sk, SDP_MID_SRCAVAIL_CANCEL, 0); + return sdp_alloc_skb(sk, SDP_MID_SRCAVAIL_CANCEL, 0, gfp); } static inline struct sk_buff *sdp_alloc_skb_rdmardcompl(struct sock *sk, - u32 len) + u32 len, gfp_t gfp) { struct sk_buff *skb; struct sdp_rrch *rrch; - skb = sdp_alloc_skb(sk, SDP_MID_RDMARDCOMPL, sizeof(*rrch)); + skb = sdp_alloc_skb(sk, SDP_MID_RDMARDCOMPL, sizeof(*rrch), gfp); rrch = (struct sdp_rrch *)skb_put(skb, sizeof(*rrch)); rrch->len = htonl(len); @@ -653,9 +657,9 @@ static inline struct sk_buff *sdp_alloc_skb_rdmardcompl(struct sock *sk, return skb; } -static inline struct sk_buff *sdp_alloc_skb_sendsm(struct sock *sk) +static inline struct sk_buff *sdp_alloc_skb_sendsm(struct sock *sk, gfp_t gfp) { - return sdp_alloc_skb(sk, SDP_MID_SENDSM, 0); + return sdp_alloc_skb(sk, SDP_MID_SENDSM, 0, gfp); } static inline int sdp_tx_ring_slots_left(struct sdp_sock *ssk) { @@ -742,7 +746,7 @@ int sdp_tx_ring_create(struct sdp_sock *ssk, struct ib_device *device); void sdp_tx_ring_destroy(struct sdp_sock *ssk); int sdp_xmit_poll(struct sdp_sock *ssk, int force); void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb); -void sdp_post_sends(struct sdp_sock *ssk, int nonagle); +void sdp_post_sends(struct sdp_sock *ssk, gfp_t gfp); void sdp_nagle_timeout(unsigned long data); void sdp_post_keepalive(struct sdp_sock *ssk); diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c index 6c1ac11452a9..24f900887334 100644 --- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c +++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c @@ -164,7 +164,7 @@ void sdp_nagle_timeout(unsigned long data) } ssk->nagle_last_unacked = 0; - sdp_post_sends(ssk, 0); + sdp_post_sends(ssk, GFP_ATOMIC); if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) sk_stream_write_space(&ssk->isk.sk); @@ -175,12 +175,11 @@ out2: mod_timer(&ssk->nagle_timer, jiffies + SDP_NAGLE_TIMEOUT); } -void sdp_post_sends(struct sdp_sock *ssk, int nonagle) +void sdp_post_sends(struct sdp_sock *ssk, gfp_t gfp) { /* TODO: nonagle? */ struct sk_buff *skb; int c; - gfp_t gfp_page; int post_count = 0; struct sock *sk = &ssk->isk.sk; @@ -194,11 +193,6 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle) return; } - if (unlikely(ssk->isk.sk.sk_allocation)) - gfp_page = ssk->isk.sk.sk_allocation; - else - gfp_page = GFP_KERNEL; - if (sdp_tx_ring_slots_left(ssk) < SDP_TX_SIZE / 2) { int wc_processed = sdp_xmit_poll(ssk, 1); sdp_dbg_data(&ssk->isk.sk, "freed %d\n", wc_processed); @@ -211,7 +205,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle) ssk->recv_request = 0; skb = sdp_alloc_skb_chrcvbuf_ack(sk, - ssk->recv_frags * PAGE_SIZE); + ssk->recv_frags * PAGE_SIZE, gfp); sdp_post_send(ssk, skb); post_count++; @@ -246,7 +240,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle) likely((1 << ssk->isk.sk.sk_state) & (TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) { - skb = sdp_alloc_skb_data(&ssk->isk.sk); + skb = sdp_alloc_skb_data(&ssk->isk.sk, gfp); sdp_post_send(ssk, skb); SDPSTATS_COUNTER_INC(post_send_credits); @@ -263,7 +257,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle) tx_credits(ssk) > 1) { ssk->sdp_disconnect = 0; - skb = sdp_alloc_skb_disconnect(sk); + skb = sdp_alloc_skb_disconnect(sk, gfp); sdp_post_send(ssk, skb); post_count++; diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c index 929609fca3db..0253a0d2e33f 100644 --- a/drivers/infiniband/ulp/sdp/sdp_main.c +++ b/drivers/infiniband/ulp/sdp/sdp_main.c @@ -1805,7 +1805,7 @@ new_segment: goto wait_for_sndbuf; } - skb = sdp_alloc_skb_data(sk); + skb = sdp_alloc_skb_data(sk, 0); if (!skb) goto wait_for_memory; diff --git a/drivers/infiniband/ulp/sdp/sdp_tx.c b/drivers/infiniband/ulp/sdp/sdp_tx.c index 8ec612585281..386ab40f03aa 100644 --- a/drivers/infiniband/ulp/sdp/sdp_tx.c +++ b/drivers/infiniband/ulp/sdp/sdp_tx.c @@ -298,7 +298,7 @@ static int sdp_process_tx_cq(struct sdp_sock *ssk) if (wc_processed) { struct sock *sk = &ssk->isk.sk; - sdp_post_sends(ssk, 0); + sdp_post_sends(ssk, GFP_ATOMIC); sdp_prf1(sk, NULL, "Waking sendmsg. inflight=%d", (u32) tx_ring_posted(ssk)); sk_stream_write_space(&ssk->isk.sk); diff --git a/drivers/infiniband/ulp/sdp/sdp_zcopy.c b/drivers/infiniband/ulp/sdp/sdp_zcopy.c index 574a43928d5c..8d734ec4115e 100644 --- a/drivers/infiniband/ulp/sdp/sdp_zcopy.c +++ b/drivers/infiniband/ulp/sdp/sdp_zcopy.c @@ -60,7 +60,7 @@ static int sdp_post_srcavail(struct sock *sk, struct tx_srcavail_state *tx_sa, tx_sa->bytes_sent = tx_sa->bytes_acked = 0; - skb = sdp_alloc_skb_srcavail(sk, len, tx_sa->fmr->fmr->lkey, off); + skb = sdp_alloc_skb_srcavail(sk, len, tx_sa->fmr->fmr->lkey, off, 0); if (!skb) { return -ENOMEM; } @@ -102,10 +102,10 @@ static int sdp_post_srcavail_cancel(struct sock *sk) sdp_warn(&ssk->isk.sk, "Posting srcavail cancel\n"); - skb = sdp_alloc_skb_srcavail_cancel(sk); + skb = sdp_alloc_skb_srcavail_cancel(sk, 0); skb_entail(sk, ssk, skb); - sdp_post_sends(ssk, 1); + sdp_post_sends(ssk, 0); schedule_delayed_work(&ssk->srcavail_cancel_work, SDP_SRCAVAIL_CANCEL_TIMEOUT); @@ -285,7 +285,7 @@ int sdp_post_rdma_rd_compl(struct sdp_sock *ssk, if (rx_sa->used <= rx_sa->reported) return 0; - skb = sdp_alloc_skb_rdmardcompl(&ssk->isk.sk, copied); + skb = sdp_alloc_skb_rdmardcompl(&ssk->isk.sk, copied, 0); rx_sa->reported += copied; @@ -297,7 +297,7 @@ int sdp_post_rdma_rd_compl(struct sdp_sock *ssk, int sdp_post_sendsm(struct sock *sk) { - struct sk_buff *skb = sdp_alloc_skb_sendsm(sk); + struct sk_buff *skb = sdp_alloc_skb_sendsm(sk, 0); sdp_post_send(sdp_sk(sk), skb); -- 2.50.1