From: Dotan Barak Date: Tue, 3 Jul 2012 07:35:01 +0000 (+0300) Subject: sdp: prepare support to kernel 2.6.39-200.1.1.el5uek: add macro to get sk_sleep X-Git-Tag: v4.1.12-92~264^2~5^2~5 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=330d2ef4063e0d91947642154234c5d00c69eac3;p=users%2Fjedix%2Flinux-maple.git sdp: prepare support to kernel 2.6.39-200.1.1.el5uek: add macro to get sk_sleep This will ease up the porting to the new kernel. Signed-off-by: Dotan Barak --- diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h index 76dea5de01fe7..5fba626375630 100644 --- a/drivers/infiniband/ulp/sdp/sdp.h +++ b/drivers/infiniband/ulp/sdp/sdp.h @@ -49,6 +49,7 @@ #define sdp_inet_daddr(sk) inet_sk(sk)->daddr #define sdp_inet_rcv_saddr(sk) inet_sk(sk)->rcv_saddr +#define sdp_sk_sleep(sk) (sk)->sk_sleep #define sk_ssk(ssk) ((struct sock *)ssk) /* Interval between sucessive polls in the Tx routine when polling is used diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c index 2d6463468ff88..afb2d102fe03c 100644 --- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c +++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c @@ -168,7 +168,7 @@ void sdp_nagle_timeout(unsigned long data) ssk->nagle_last_unacked = 0; sdp_post_sends(ssk, GFP_ATOMIC); - if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) + if (sdp_sk_sleep(sk) && waitqueue_active(sdp_sk_sleep(sk))) sk_stream_write_space(sk); out: bh_unlock_sock(sk); diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c index 3d29c576bc1a4..bd2ff731c8990 100644 --- a/drivers/infiniband/ulp/sdp/sdp_main.c +++ b/drivers/infiniband/ulp/sdp/sdp_main.c @@ -1013,7 +1013,7 @@ static int sdp_wait_for_connect(struct sock *sk, long timeo) * having to remove and re-insert us on the wait queue. */ for (;;) { - prepare_to_wait_exclusive(sk->sk_sleep, &wait, + prepare_to_wait_exclusive(sdp_sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); release_sock(sk); if (list_empty(&ssk->accept_queue)) { @@ -1033,7 +1033,7 @@ static int sdp_wait_for_connect(struct sock *sk, long timeo) if (!timeo) break; } - finish_wait(sk->sk_sleep, &wait); + finish_wait(sdp_sk_sleep(sk), &wait); sdp_dbg(sk, "%s returns %d\n", __func__, err); return err; } @@ -1755,7 +1755,7 @@ int sdp_tx_wait_memory(struct sdp_sock *ssk, long *timeo_p, int *credits_needed) while (1) { set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); - prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); + prepare_to_wait(sdp_sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto do_error; @@ -1823,7 +1823,7 @@ int sdp_tx_wait_memory(struct sdp_sock *ssk, long *timeo_p, int *credits_needed) *timeo_p = current_timeo; } out: - finish_wait(sk->sk_sleep, &wait); + finish_wait(sdp_sk_sleep(sk), &wait); return err; do_error: diff --git a/drivers/infiniband/ulp/sdp/sdp_rx.c b/drivers/infiniband/ulp/sdp/sdp_rx.c index 8ec2f5d2df345..7b9d032662b21 100644 --- a/drivers/infiniband/ulp/sdp/sdp_rx.c +++ b/drivers/infiniband/ulp/sdp/sdp_rx.c @@ -335,7 +335,7 @@ static inline struct sk_buff *sdp_sock_queue_rcv_skb(struct sock *sk, sdp_dbg_data(sk_ssk(ssk), "got RX SrcAvail while waiting " "for TX SrcAvail. waking up TX SrcAvail" "to be aborted\n"); - wake_up(sk->sk_sleep); + wake_up(sdp_sk_sleep(sk)); } atomic_add(skb->len, &ssk->rcv_nxt); @@ -596,7 +596,7 @@ static int sdp_process_rx_skb(struct sdp_sock *ssk, struct sk_buff *skb) ssk->sa_cancel_mseq = ntohl(h->mseq); ssk->sa_cancel_arrived = 1; if (ssk->rx_sa) - wake_up(sk->sk_sleep); + wake_up(sdp_sk_sleep(sk)); skb_queue_tail(&ssk->rx_ctl_q, skb); } else if (h->mid == SDP_MID_RDMARDCOMPL) { @@ -691,8 +691,8 @@ static void sdp_bzcopy_write_space(struct sdp_sock *ssk) clear_bit(SOCK_NOSPACE, &sock->flags); sdp_prf1(sk, NULL, "Waking up sleepers"); - if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) - wake_up_interruptible(sk->sk_sleep); + if (sdp_sk_sleep(sk) && waitqueue_active(sdp_sk_sleep(sk))) + wake_up_interruptible(sdp_sk_sleep(sk)); if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) sock_wake_async(sock, 2, POLL_OUT); } @@ -810,7 +810,7 @@ void sdp_do_posts(struct sdp_sock *ssk) static inline int should_wake_up(struct sock *sk) { - return sk->sk_sleep && waitqueue_active(sk->sk_sleep) && + return sdp_sk_sleep(sk) && waitqueue_active(sdp_sk_sleep(sk)) && (posts_handler(sdp_sk(sk)) || somebody_is_waiting(sk)); } @@ -829,7 +829,7 @@ static void sdp_rx_irq(struct ib_cq *cq, void *cq_context) sdp_prf(sk, NULL, "rx irq"); if (should_wake_up(sk)) { - wake_up_interruptible(sk->sk_sleep); + wake_up_interruptible(sdp_sk_sleep(sk)); SDPSTATS_COUNTER_INC(rx_int_wake_up); } else { if (queue_work_on(ssk->cpu, rx_comp_wq, &ssk->rx_comp_work)) diff --git a/drivers/infiniband/ulp/sdp/sdp_tx.c b/drivers/infiniband/ulp/sdp/sdp_tx.c index 55aa14e451d8a..bde3ec626a807 100644 --- a/drivers/infiniband/ulp/sdp/sdp_tx.c +++ b/drivers/infiniband/ulp/sdp/sdp_tx.c @@ -248,7 +248,7 @@ static inline void sdp_process_tx_wc(struct sdp_sock *ssk, struct ib_wc *wc) "probably was canceled already\n"); } - wake_up(sk->sk_sleep); + wake_up(sdp_sk_sleep(sk)); } else { /* Keepalive probe sent cleanup */ sdp_cnt(sdp_keepalive_probes_sent); @@ -316,14 +316,14 @@ static int sdp_tx_handler_select(struct sdp_sock *ssk) if (sk->sk_write_pending) { /* Do the TX posts from sender context */ - if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) { + if (sdp_sk_sleep(sk) && waitqueue_active(sdp_sk_sleep(sk))) { sdp_prf1(sk, NULL, "Waking up pending sendmsg"); - wake_up_interruptible(sk->sk_sleep); + wake_up_interruptible(sdp_sk_sleep(sk)); return 0; } else sdp_prf1(sk, NULL, "Unexpected: sk_sleep=%p, " "waitqueue_active: %d\n", - sk->sk_sleep, waitqueue_active(sk->sk_sleep)); + sdp_sk_sleep(sk), waitqueue_active(sdp_sk_sleep(sk))); } if (posts_handler(ssk)) { diff --git a/drivers/infiniband/ulp/sdp/sdp_zcopy.c b/drivers/infiniband/ulp/sdp/sdp_zcopy.c index 15c7acaf6484c..6abbca160bba2 100644 --- a/drivers/infiniband/ulp/sdp/sdp_zcopy.c +++ b/drivers/infiniband/ulp/sdp/sdp_zcopy.c @@ -148,7 +148,7 @@ static int sdp_wait_rdmardcompl(struct sdp_sock *ssk, long *timeo_p, sdp_dbg_data(sk, "sleep till RdmaRdCompl. timeo = %ld.\n", *timeo_p); sdp_prf1(sk, NULL, "Going to sleep"); while (ssk->qp_active) { - prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); + prepare_to_wait(sdp_sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (unlikely(!*timeo_p)) { err = -ETIME; @@ -204,7 +204,7 @@ static int sdp_wait_rdmardcompl(struct sdp_sock *ssk, long *timeo_p, *timeo_p = current_timeo; } - finish_wait(sk->sk_sleep, &wait); + finish_wait(sdp_sk_sleep(sk), &wait); sdp_dbg_data(sk, "Finished waiting - RdmaRdCompl: %d/%d bytes, flags: 0x%x\n", tx_sa->bytes_acked, tx_sa->bytes_sent, tx_sa->abort_flags); @@ -225,7 +225,7 @@ static int sdp_wait_rdma_wr_finished(struct sdp_sock *ssk) sdp_dbg_data(sk, "Sleep till RDMA wr finished.\n"); while (1) { - prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); + prepare_to_wait(sdp_sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); if (!ssk->tx_ring.rdma_inflight->busy) { sdp_dbg_data(sk, "got rdma cqe\n"); @@ -259,7 +259,7 @@ static int sdp_wait_rdma_wr_finished(struct sdp_sock *ssk) posts_handler_get(ssk); } - finish_wait(sk->sk_sleep, &wait); + finish_wait(sdp_sk_sleep(sk), &wait); sdp_dbg_data(sk, "Finished waiting\n"); return rc; @@ -337,7 +337,7 @@ void sdp_handle_sendsm(struct sdp_sock *ssk, u32 mseq_ack) sdp_dbg_data(sk, "Got SendSM - aborting SrcAvail\n"); ssk->tx_sa->abort_flags |= TX_SA_SENDSM; - wake_up(sk->sk_sleep); + wake_up(sdp_sk_sleep(sk)); sdp_dbg_data(sk, "woke up sleepers\n"); out: @@ -369,7 +369,7 @@ void sdp_handle_rdma_read_compl(struct sdp_sock *ssk, u32 mseq_ack, ssk->tx_sa->bytes_acked += bytes_completed; - wake_up(sk->sk_sleep); + wake_up(sdp_sk_sleep(sk)); sdp_dbg_data(sk, "woke up sleepers\n"); out: