return (struct sdp_sock *)sk;
}
+static inline void sdp_set_state(struct sock *sk, int state)
+{
+ sk->sk_state = state;
+}
+
static inline void sdp_set_error(struct sock *sk, int err)
{
sk->sk_err = -err;
if (sk->sk_socket)
sk->sk_socket->state = SS_UNCONNECTED;
- sk->sk_state = TCP_CLOSE;
+ sdp_set_state(sk, TCP_CLOSE);
if (sdp_sk(sk)->time_wait) {
sdp_dbg(sk, "%s: destroy in time wait state\n", __func__);
sk->sk_error_report(sk);
}
-static inline void sdp_set_state(struct sock *sk, int state)
-{
- sk->sk_state = state;
-}
-
extern struct workqueue_struct *sdp_workqueue;
int sdp_cma_handler(struct rdma_cm_id *, struct rdma_cm_event *);
BUG_ON(!skb);
sdp_post_send(ssk, skb, SDP_MID_DISCONN);
if (ssk->isk.sk.sk_state == TCP_FIN_WAIT1)
- ssk->isk.sk.sk_state = TCP_FIN_WAIT2;
+ sdp_set_state(&ssk->isk.sk, TCP_FIN_WAIT2);
else
- ssk->isk.sk.sk_state = TCP_CLOSING;
+ sdp_set_state(&ssk->isk.sk, TCP_CLOSING);
}
}
list_add_tail(&sdp_sk(child)->backlog_queue, &sdp_sk(sk)->backlog_queue);
sdp_sk(child)->parent = sk;
- child->sk_state = TCP_SYN_RECV;
+ sdp_set_state(child, TCP_SYN_RECV);
/* child->sk_write_space(child); */
/* child->sk_data_ready(child, 0); */
struct sockaddr_in *dst_addr;
sdp_dbg(sk, "%s\n", __func__);
- sk->sk_state = TCP_ESTABLISHED;
+ sdp_set_state(sk, TCP_ESTABLISHED);
if (sock_flag(sk, SOCK_KEEPOPEN))
sdp_start_keepalive_timer(sk);
parent = sdp_sk(sk)->parent;
BUG_ON(!parent);
- sk->sk_state = TCP_ESTABLISHED;
+ sdp_set_state(sk, TCP_ESTABLISHED);
if (sock_flag(sk, SOCK_KEEPOPEN))
sdp_start_keepalive_timer(sk);
return 0;
if (sk->sk_state == TCP_ESTABLISHED)
- sk->sk_state = TCP_FIN_WAIT1;
+ sdp_set_state(sk, TCP_FIN_WAIT1);
else if (sk->sk_state == TCP_CLOSE_WAIT)
- sk->sk_state = TCP_LAST_ACK;
+ sdp_set_state(sk, TCP_LAST_ACK);
else
return 0;
return 1;
if (sk->sk_state == TCP_FIN_WAIT2 &&
!sk->sk_send_head &&
sdp_sk(sk)->tx_head == sdp_sk(sk)->tx_tail) {
- sk->sk_state = TCP_CLOSE;
+ sdp_set_state(sk, TCP_CLOSE);
}
if ((1 << sk->sk_state) & (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2)) {
return rc;
}
- sk->sk_state = TCP_SYN_SENT;
+ sdp_set_state(sk, TCP_SYN_SENT);
return 0;
}
sdp_dbg(sk, "%s: refcnt %d\n", __func__, atomic_read(&sk->sk_refcnt));
- sk->sk_state = TCP_CLOSE;
+ sdp_set_state(sk, TCP_CLOSE);
sdp_sk(sk)->time_wait = 0;
release_sock(sk);
void sdp_time_wait_destroy_sk(struct sdp_sock *ssk)
{
ssk->time_wait = 0;
- ssk->isk.sk.sk_state = TCP_CLOSE;
+ sdp_set_state(&ssk->isk.sk, TCP_CLOSE);
queue_work(sdp_workqueue, &ssk->destroy_work);
}
return;
if (sk->sk_state == TCP_ESTABLISHED)
- sk->sk_state = TCP_FIN_WAIT1;
+ sdp_set_state(sk, TCP_FIN_WAIT1);
else if (sk->sk_state == TCP_CLOSE_WAIT)
- sk->sk_state = TCP_LAST_ACK;
+ sdp_set_state(sk, TCP_LAST_ACK);
else
return;
sdp_warn(sk, "rdma_listen failed: %d\n", rc);
sdp_set_error(sk, rc);
} else
- sk->sk_state = TCP_LISTEN;
+ sdp_set_state(sk, TCP_LISTEN);
return rc;
}