struct list_head accept_queue;
struct list_head backlog_queue;
struct sock *parent;
- /* rdma specific */
- struct rdma_cm_id *id;
- struct ib_qp *qp;
- struct ib_cq *cq;
- struct ib_mr *mr;
- struct device *dma_device;
+
+ struct work_struct work;
+ wait_queue_head_t wq;
+
+ struct work_struct time_wait_work;
+ struct work_struct destroy_work;
+
+ int time_wait;
+
/* Like tcp_sock */
__u16 urg_data;
int offset; /* like seq in tcp */
int xmit_size_goal;
int write_seq;
int pushed_seq;
- int nonagle;
- /* SDP specific */
- wait_queue_head_t wq;
-
- struct work_struct time_wait_work;
- struct work_struct destroy_work;
+ int nonagle;
- int time_wait;
+ /* Data below will be reset on error */
+ /* rdma specific */
+ struct rdma_cm_id *id;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ struct ib_mr *mr;
+ struct device *dma_device;
- spinlock_t lock;
+ /* SDP specific */
struct sdp_buf *rx_ring;
struct ib_recv_wr rx_wr;
unsigned rx_head;
int remote_credits;
- spinlock_t tx_lock;
struct sdp_buf *tx_ring;
unsigned tx_head;
unsigned tx_tail;
struct ib_sge ibsge[SDP_MAX_SEND_SKB_FRAGS + 1];
struct ib_wc ibwc[SDP_NUM_WC];
- struct work_struct work;
};
extern struct proto sdp_proto;
extern struct workqueue_struct *sdp_workqueue;
int sdp_cma_handler(struct rdma_cm_id *, struct rdma_cm_event *);
-void sdp_close_sk(struct sock *sk);
+void sdp_reset_sk(struct sock *sk, int rc);
+void sdp_time_wait_destroy_sk(struct sdp_sock *ssk);
void sdp_completion_handler(struct ib_cq *cq, void *cq_context);
void sdp_work(void *);
void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid);
sdp_dbg(sk, "%s\n", __func__);
+ sdp_sk(sk)->tx_head = 1;
+ sdp_sk(sk)->tx_tail = 1;
+ sdp_sk(sk)->rx_head = 1;
+ sdp_sk(sk)->rx_tail = 1;
+
sdp_sk(sk)->tx_ring = kmalloc(sizeof *sdp_sk(sk)->tx_ring * SDP_TX_SIZE,
GFP_KERNEL);
if (!sdp_sk(sk)->tx_ring) {
sdp_sk(sk)->id = NULL;
id->qp = NULL;
id->context = NULL;
- sdp_set_error(sk, rc);
parent = sdp_sk(sk)->parent;
+ if (!parent)
+ sdp_reset_sk(sk, rc);
}
release_sock(sk);
done:
release_sock(parent);
sk_common_release(child);
- } else if (rc) {
}
return rc;
}
return 0;
}
-/* TODO: linger? */
-void sdp_close_sk(struct sock *sk)
+static void sdp_destroy_qp(struct sdp_sock *ssk)
{
- struct sdp_sock *ssk = sdp_sk(sk);
- struct rdma_cm_id *id = NULL;
struct ib_pd *pd = NULL;
struct ib_cq *cq = NULL;
- sdp_dbg(sk, "%s\n", __func__);
-
- lock_sock(sk);
-
- sk->sk_send_head = NULL;
- skb_queue_purge(&sk->sk_write_queue);
- /*
- * If sendmsg cached page exists, toss it.
- */
- if (sk->sk_sndmsg_page) {
- __free_page(sk->sk_sndmsg_page);
- sk->sk_sndmsg_page = NULL;
- }
-
- id = ssk->id;
- if (ssk->id) {
- id->qp = NULL;
- ssk->id = NULL;
- release_sock(sk);
- rdma_destroy_id(id);
- lock_sock(sk);
- }
-
if (ssk->qp) {
pd = ssk->qp->pd;
cq = ssk->cq;
- sdp_sk(sk)->cq = NULL;
+ ssk->cq = NULL;
ib_destroy_qp(ssk->qp);
while (ssk->rx_head != ssk->rx_tail) {
if (pd)
ib_dealloc_pd(pd);
+ kfree(ssk->rx_ring);
+ kfree(ssk->tx_ring);
+}
+
+void sdp_reset_sk(struct sock *sk, int rc)
+{
+ struct sdp_sock *ssk = sdp_sk(sk);
+
+ sdp_dbg(sk, "%s\n", __func__);
+
+ if (ssk->cq)
+ sdp_poll_cq(ssk, ssk->cq);
+
+ if (!(sk->sk_shutdown & RCV_SHUTDOWN))
+ sdp_set_error(sk, rc);
+
+ sdp_destroy_qp(ssk);
+
+ memset((void *)&ssk->id, 0, sizeof(*ssk) - offsetof(typeof(*ssk), id));
+
+ if (ssk->time_wait) {
+ sdp_dbg(sk, "%s: destroy in time wait state\n", __func__);
+ sdp_time_wait_destroy_sk(ssk);
+ }
+}
+
+/* TODO: linger? */
+static void sdp_close_sk(struct sock *sk)
+{
+ struct sdp_sock *ssk = sdp_sk(sk);
+ struct rdma_cm_id *id = NULL;
+ sdp_dbg(sk, "%s\n", __func__);
+
+ lock_sock(sk);
+
+ sk->sk_send_head = NULL;
+ skb_queue_purge(&sk->sk_write_queue);
+ /*
+ * If sendmsg cached page exists, toss it.
+ */
+ if (sk->sk_sndmsg_page) {
+ __free_page(sk->sk_sndmsg_page);
+ sk->sk_sndmsg_page = NULL;
+ }
+
+ id = ssk->id;
+ if (ssk->id) {
+ id->qp = NULL;
+ ssk->id = NULL;
+ release_sock(sk);
+ rdma_destroy_id(id);
+ lock_sock(sk);
+ }
+
skb_queue_purge(&sk->sk_receive_queue);
- kfree(sdp_sk(sk)->rx_ring);
- kfree(sdp_sk(sk)->tx_ring);
+ sdp_destroy_qp(ssk);
sdp_dbg(sk, "%s done; releasing sock\n", __func__);
release_sock(sk);
sock_put(data);
}
+void sdp_time_wait_destroy_sk(struct sdp_sock *ssk)
+{
+ ssk->time_wait = 0;
+ ssk->isk.sk.sk_state = TCP_CLOSE;
+ queue_work(sdp_workqueue, &ssk->destroy_work);
+}
+
static int sdp_init_sock(struct sock *sk)
{
struct sdp_sock *ssk = sdp_sk(sk);
INIT_WORK(&ssk->time_wait_work, sdp_time_wait_work, sk);
INIT_WORK(&ssk->destroy_work, sdp_destroy_work, sk);
- ssk->tx_head = 1;
- ssk->tx_tail = 1;
- ssk->rx_head = 1;
- ssk->rx_tail = 1;
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_NO_CSUM;
return 0;
}