This will ease up the porting to the new kernel.
Signed-off-by: Dotan Barak <dotanb@dev.mellanox.co.il>
#define sdp_inet_daddr(sk) inet_sk(sk)->daddr
#define sdp_inet_rcv_saddr(sk) inet_sk(sk)->rcv_saddr
+#define sdp_sk_sleep(sk) (sk)->sk_sleep
#define sk_ssk(ssk) ((struct sock *)ssk)
/* Interval between sucessive polls in the Tx routine when polling is used
ssk->nagle_last_unacked = 0;
sdp_post_sends(ssk, GFP_ATOMIC);
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ if (sdp_sk_sleep(sk) && waitqueue_active(sdp_sk_sleep(sk)))
sk_stream_write_space(sk);
out:
bh_unlock_sock(sk);
* having to remove and re-insert us on the wait queue.
*/
for (;;) {
- prepare_to_wait_exclusive(sk->sk_sleep, &wait,
+ prepare_to_wait_exclusive(sdp_sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
release_sock(sk);
if (list_empty(&ssk->accept_queue)) {
if (!timeo)
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sdp_sk_sleep(sk), &wait);
sdp_dbg(sk, "%s returns %d\n", __func__, err);
return err;
}
while (1) {
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sdp_sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto do_error;
*timeo_p = current_timeo;
}
out:
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sdp_sk_sleep(sk), &wait);
return err;
do_error:
sdp_dbg_data(sk_ssk(ssk), "got RX SrcAvail while waiting "
"for TX SrcAvail. waking up TX SrcAvail"
"to be aborted\n");
- wake_up(sk->sk_sleep);
+ wake_up(sdp_sk_sleep(sk));
}
atomic_add(skb->len, &ssk->rcv_nxt);
ssk->sa_cancel_mseq = ntohl(h->mseq);
ssk->sa_cancel_arrived = 1;
if (ssk->rx_sa)
- wake_up(sk->sk_sleep);
+ wake_up(sdp_sk_sleep(sk));
skb_queue_tail(&ssk->rx_ctl_q, skb);
} else if (h->mid == SDP_MID_RDMARDCOMPL) {
clear_bit(SOCK_NOSPACE, &sock->flags);
sdp_prf1(sk, NULL, "Waking up sleepers");
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ if (sdp_sk_sleep(sk) && waitqueue_active(sdp_sk_sleep(sk)))
+ wake_up_interruptible(sdp_sk_sleep(sk));
if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock, 2, POLL_OUT);
}
static inline int should_wake_up(struct sock *sk)
{
- return sk->sk_sleep && waitqueue_active(sk->sk_sleep) &&
+ return sdp_sk_sleep(sk) && waitqueue_active(sdp_sk_sleep(sk)) &&
(posts_handler(sdp_sk(sk)) || somebody_is_waiting(sk));
}
sdp_prf(sk, NULL, "rx irq");
if (should_wake_up(sk)) {
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible(sdp_sk_sleep(sk));
SDPSTATS_COUNTER_INC(rx_int_wake_up);
} else {
if (queue_work_on(ssk->cpu, rx_comp_wq, &ssk->rx_comp_work))
"probably was canceled already\n");
}
- wake_up(sk->sk_sleep);
+ wake_up(sdp_sk_sleep(sk));
} else {
/* Keepalive probe sent cleanup */
sdp_cnt(sdp_keepalive_probes_sent);
if (sk->sk_write_pending) {
/* Do the TX posts from sender context */
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
+ if (sdp_sk_sleep(sk) && waitqueue_active(sdp_sk_sleep(sk))) {
sdp_prf1(sk, NULL, "Waking up pending sendmsg");
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible(sdp_sk_sleep(sk));
return 0;
} else
sdp_prf1(sk, NULL, "Unexpected: sk_sleep=%p, "
"waitqueue_active: %d\n",
- sk->sk_sleep, waitqueue_active(sk->sk_sleep));
+ sdp_sk_sleep(sk), waitqueue_active(sdp_sk_sleep(sk)));
}
if (posts_handler(ssk)) {
sdp_dbg_data(sk, "sleep till RdmaRdCompl. timeo = %ld.\n", *timeo_p);
sdp_prf1(sk, NULL, "Going to sleep");
while (ssk->qp_active) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sdp_sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (unlikely(!*timeo_p)) {
err = -ETIME;
*timeo_p = current_timeo;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sdp_sk_sleep(sk), &wait);
sdp_dbg_data(sk, "Finished waiting - RdmaRdCompl: %d/%d bytes, flags: 0x%x\n",
tx_sa->bytes_acked, tx_sa->bytes_sent, tx_sa->abort_flags);
sdp_dbg_data(sk, "Sleep till RDMA wr finished.\n");
while (1) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sdp_sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
if (!ssk->tx_ring.rdma_inflight->busy) {
sdp_dbg_data(sk, "got rdma cqe\n");
posts_handler_get(ssk);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sdp_sk_sleep(sk), &wait);
sdp_dbg_data(sk, "Finished waiting\n");
return rc;
sdp_dbg_data(sk, "Got SendSM - aborting SrcAvail\n");
ssk->tx_sa->abort_flags |= TX_SA_SENDSM;
- wake_up(sk->sk_sleep);
+ wake_up(sdp_sk_sleep(sk));
sdp_dbg_data(sk, "woke up sleepers\n");
out:
ssk->tx_sa->bytes_acked += bytes_completed;
- wake_up(sk->sk_sleep);
+ wake_up(sdp_sk_sleep(sk));
sdp_dbg_data(sk, "woke up sleepers\n");
out: