struct work_struct work;
wait_queue_head_t wq;
- struct work_struct time_wait_work;
+ struct delayed_work time_wait_work;
struct work_struct destroy_work;
/* Like tcp_sock */
void sdp_reset_sk(struct sock *sk, int rc);
void sdp_time_wait_destroy_sk(struct sdp_sock *ssk);
void sdp_completion_handler(struct ib_cq *cq, void *cq_context);
-void sdp_work(void *);
+void sdp_work(struct work_struct *work);
void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid);
void sdp_post_recvs(struct sdp_sock *ssk);
int sdp_poll_cq(struct sdp_sock *ssk, struct ib_cq *cq);
void sdp_post_sends(struct sdp_sock *ssk, int nonagle);
-void sdp_destroy_work(void *data);
-void sdp_time_wait_work(void *data);
+void sdp_destroy_work(struct work_struct *work);
+void sdp_time_wait_work(struct delayed_work *work);
struct sk_buff *sdp_recv_completion(struct sdp_sock *ssk, int id);
struct sk_buff *sdp_send_completion(struct sdp_sock *ssk, int mseq);
void sdp_urg(struct sdp_sock *ssk, struct sk_buff *skb);
return ret;
}
-void sdp_work(void *data)
+void sdp_work(struct work_struct *work)
{
- struct sock *sk = (struct sock *)data;
- struct sdp_sock *ssk = sdp_sk(sk);
+ struct sdp_sock *ssk = container_of(work, struct sdp_sock, work);
+ struct sock *sk = &ssk->isk.sk;
struct ib_cq *cq;
sdp_dbg_data(sk, "%s\n", __func__);
}
sdp_sk(sk)->mr = mr;
- INIT_WORK(&sdp_sk(sk)->work, sdp_work, sdp_sk(sk));
+ INIT_WORK(&sdp_sk(sk)->work, sdp_work);
cq = ib_create_cq(device, sdp_completion_handler, sdp_cq_event_handler,
sk, SDP_TX_SIZE + SDP_RX_SIZE);
{
struct sockaddr_in *dst_addr;
struct sock *child;
- struct sdp_hh *h;
+ const struct sdp_hh *h;
int rc;
sdp_dbg(sk, "%s %p -> %p\n", __func__, sdp_sk(sk)->id, id);
- h = event->private_data;
+ h = event->param.conn.private_data;
if (!h->max_adverts)
return -EINVAL;
INIT_LIST_HEAD(&sdp_sk(child)->accept_queue);
INIT_LIST_HEAD(&sdp_sk(child)->backlog_queue);
- INIT_WORK(&sdp_sk(child)->time_wait_work, sdp_time_wait_work, child);
- INIT_WORK(&sdp_sk(child)->destroy_work, sdp_destroy_work, child);
+ INIT_DELAYED_WORK(&sdp_sk(child)->time_wait_work, sdp_time_wait_work);
+ INIT_WORK(&sdp_sk(child)->destroy_work, sdp_destroy_work);
dst_addr = (struct sockaddr_in *)&id->route.addr.dst_addr;
inet_sk(child)->dport = dst_addr->sin_port;
static int sdp_response_handler(struct sock *sk, struct rdma_cm_id *id,
struct rdma_cm_event *event)
{
- struct sdp_hah *h;
+ const struct sdp_hah *h;
struct sockaddr_in *dst_addr;
sdp_dbg(sk, "%s\n", __func__);
if (sock_flag(sk, SOCK_DEAD))
return 0;
- h = event->private_data;
+ h = event->param.conn.private_data;
sdp_sk(sk)->bufs = ntohs(h->bsdh.bufs);
sdp_sk(sk)->xmit_size_goal = ntohl(h->actrcvsz) -
sizeof(struct sdp_bsdh);
return put_user(answ, (int __user *)arg);
}
-void sdp_destroy_work(void *data)
+void sdp_destroy_work(struct work_struct *work)
{
- struct sock *sk = data;
+ struct sdp_sock *ssk = container_of(work, struct sdp_sock, destroy_work);
+ struct sock *sk = &ssk->isk.sk;
sdp_dbg(sk, "%s: refcnt %d\n", __func__, atomic_read(&sk->sk_refcnt));
cancel_delayed_work(&sdp_sk(sk)->time_wait_work);
sock_put(sk);
}
-void sdp_time_wait_work(void *data)
+void sdp_time_wait_work(struct delayed_work *work)
{
- struct sock *sk = data;
+ struct sdp_sock *ssk = container_of(work, struct sdp_sock, time_wait_work);
+ struct sock *sk = &ssk->isk.sk;
lock_sock(sk);
sdp_dbg(sk, "%s\n", __func__);
release_sock(sk);
atomic_dec(sk->sk_prot->orphan_count);
- sock_put(data);
+ sock_put(sk);
}
void sdp_time_wait_destroy_sk(struct sdp_sock *ssk)
INIT_LIST_HEAD(&ssk->accept_queue);
INIT_LIST_HEAD(&ssk->backlog_queue);
- INIT_WORK(&ssk->time_wait_work, sdp_time_wait_work, sk);
- INIT_WORK(&ssk->destroy_work, sdp_destroy_work, sk);
+ INIT_DELAYED_WORK(&ssk->time_wait_work, sdp_time_wait_work);
+ INIT_WORK(&ssk->destroy_work, sdp_destroy_work);
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_NO_CSUM;
return 0;