u64 mapping[SDP_MAX_SEND_SKB_FRAGS + 1];
};
+
+struct sdp_tx_ring {
+ struct sdp_buf *buffer;
+ unsigned head;
+ unsigned tail;
+
+ int una_seq;
+ unsigned credits;
+ u16 poll_cnt;
+};
+
+static inline int sdp_tx_ring_slots_left(struct sdp_tx_ring *tx_ring)
+{
+ return SDP_TX_SIZE - (tx_ring->head - tx_ring->tail);
+}
+
struct sdp_sock {
/* sk has to be the first member of inet_sock */
struct inet_sock isk;
u32 rcv_nxt;
int write_seq;
- int snd_una;
int pushed_seq;
int xmit_size_goal;
int nonagle;
int sdp_disconnect;
int destruct_in_process;
- struct sdp_buf *rx_ring;
- struct sdp_buf *tx_ring;
+
- /* rdma specific */
- struct ib_qp *qp;
- struct ib_cq *cq;
- struct ib_mr *mr;
/* Data below will be reset on error */
struct rdma_cm_id *id;
struct ib_device *ib_device;
unsigned rx_head;
unsigned rx_tail;
unsigned mseq_ack;
- unsigned tx_credits;
unsigned max_bufs; /* Initial buffers offered by other side */
unsigned min_bufs; /* Low water mark to wake senders */
int remote_credits;
int poll_cq;
- unsigned tx_head;
- unsigned tx_tail;
+ /* rdma specific */
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ struct ib_mr *mr;
+
+ struct sdp_buf *rx_ring;
+ struct sdp_tx_ring tx_ring;
struct ib_send_wr tx_wr;
/* SDP slow start */
int zcopy_thresh;
struct ib_sge ibsge[SDP_MAX_SEND_SKB_FRAGS + 1];
- struct ib_wc ibwc[SDP_NUM_WC];
};
/* Context used for synchronous zero copy bcopy (BZCOY) */
extern int rcvbuf_initial_size;
extern struct proto sdp_proto;
-extern struct workqueue_struct *sdp_workqueue;
+extern struct workqueue_struct *comp_wq;
extern atomic_t sdp_current_mem_usage;
extern spinlock_t sdp_large_sockets_lock;
sk->sk_error_report(sk);
}
-extern struct workqueue_struct *sdp_workqueue;
+static inline void sdp_arm_cq(struct sock *sk)
+{
+ sdp_dbg_data(sk, "ib_req_notify_cq on cq\n");
+
+ ib_req_notify_cq(sdp_sk(sk)->cq, IB_CQ_NEXT_COMP);
+}
#ifdef CONFIG_INFINIBAND_SDP_DEBUG_DATA
void dump_packet(struct sock *sk, char *str, struct sk_buff *skb, const struct sdp_bsdh *h);
{
struct sdp_buf *tx_req;
struct sdp_bsdh *h = (struct sdp_bsdh *)skb_push(skb, sizeof *h);
- unsigned mseq = ssk->tx_head;
+ unsigned mseq = ssk->tx_ring.head;
int i, rc, frags;
u64 addr;
struct ib_device *dev;
h->mseq_ack = htonl(ssk->mseq_ack);
SDP_DUMP_PACKET(&ssk->isk.sk, "TX", skb, h);
- tx_req = &ssk->tx_ring[mseq & (SDP_TX_SIZE - 1)];
+ tx_req = &ssk->tx_ring.buffer[mseq & (SDP_TX_SIZE - 1)];
tx_req->skb = skb;
dev = ssk->ib_device;
sge = ssk->ibsge;
}
ssk->tx_wr.next = NULL;
- ssk->tx_wr.wr_id = ssk->tx_head | SDP_OP_SEND;
+ ssk->tx_wr.wr_id = ssk->tx_ring.head | SDP_OP_SEND;
ssk->tx_wr.sg_list = ssk->ibsge;
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
last_send = jiffies;
}
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
- ++ssk->tx_head;
- --ssk->tx_credits;
+ ++ssk->tx_ring.head;
+ --ssk->tx_ring.credits;
ssk->remote_credits = ssk->rx_head - ssk->rx_tail;
if (unlikely(rc)) {
sdp_dbg(&ssk->isk.sk, "ib_post_send failed with status %d.\n", rc);
{
struct ib_device *dev;
struct sdp_buf *tx_req;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct bzcopy_state *bz;
int i, frags;
-
- if (unlikely(mseq != ssk->tx_tail)) {
+ struct sdp_tx_ring *tx_ring = &ssk->tx_ring;
+ if (unlikely(mseq != tx_ring->tail)) {
printk(KERN_WARNING "Bogus send completion id %d tail %d\n",
- mseq, ssk->tx_tail);
- return NULL;
+ mseq, tx_ring->tail);
+ goto out;
}
dev = ssk->ib_device;
- tx_req = &ssk->tx_ring[mseq & (SDP_TX_SIZE - 1)];
+ tx_req = &tx_ring->buffer[mseq & (SDP_TX_SIZE - 1)];
skb = tx_req->skb;
ib_dma_unmap_single(dev, tx_req->mapping[0], skb->len - skb->data_len,
DMA_TO_DEVICE);
DMA_TO_DEVICE);
}
- ssk->snd_una += TCP_SKB_CB(skb)->end_seq;
- ++ssk->tx_tail;
+ tx_ring->una_seq += TCP_SKB_CB(skb)->end_seq;
/* TODO: AIO and real zcopy cdoe; add their context support here */
bz = BZCOPY_STATE(skb);
if (bz)
bz->busy--;
+ ++tx_ring->tail;
+
+out:
return skb;
}
return (ssk->nonagle & TCP_NAGLE_OFF) ||
skb->next != (struct sk_buff *)&ssk->isk.sk.sk_write_queue ||
skb->len + sizeof(struct sdp_bsdh) >= ssk->xmit_size_goal ||
- (ssk->tx_tail == ssk->tx_head &&
+ (ssk->tx_ring.tail == ssk->tx_ring.head &&
!(ssk->nonagle & TCP_NAGLE_CORK)) ||
(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_PSH);
}
int sdp_post_credits(struct sdp_sock *ssk)
{
- if (likely(ssk->tx_credits > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+ if (likely(ssk->tx_ring.credits > 1) &&
+ likely(sdp_tx_ring_slots_left(&ssk->tx_ring))) {
struct sk_buff *skb;
skb = sdp_stream_alloc_skb(&ssk->isk.sk,
sizeof(struct sdp_bsdh),
if (ssk->recv_request &&
ssk->rx_tail >= ssk->recv_request_head &&
- ssk->tx_credits >= SDP_MIN_TX_CREDITS &&
- ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
+ ssk->tx_ring.credits >= SDP_MIN_TX_CREDITS &&
+ ssk->tx_ring.head - ssk->tx_ring.tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
skb = sdp_stream_alloc_skb(&ssk->isk.sk,
sdp_post_send(ssk, skb, SDP_MID_CHRCVBUF_ACK);
}
- while (ssk->tx_credits > SDP_MIN_TX_CREDITS &&
- ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE &&
+ if (ssk->tx_ring.credits <= SDP_MIN_TX_CREDITS &&
+ sdp_tx_ring_slots_left(&ssk->tx_ring) &&
+ (skb = ssk->isk.sk.sk_send_head) &&
+ sdp_nagle_off(ssk, skb)) {
+ SDPSTATS_COUNTER_INC(send_miss_no_credits);
+ }
+
+ sdp_dbg_data(&ssk->isk.sk, "credits: %d tx ring slots left: %d send_head: %p\n",
+ ssk->tx_ring.credits, sdp_tx_ring_slots_left(&ssk->tx_ring),
+ ssk->isk.sk.sk_send_head);
+
+ while (ssk->tx_ring.credits > SDP_MIN_TX_CREDITS &&
+ sdp_tx_ring_slots_left(&ssk->tx_ring) &&
(skb = ssk->isk.sk.sk_send_head) &&
sdp_nagle_off(ssk, skb)) {
update_send_head(&ssk->isk.sk, skb);
sdp_post_send(ssk, skb, SDP_MID_DATA);
}
- if (ssk->tx_credits == SDP_MIN_TX_CREDITS &&
+ if (ssk->tx_ring.credits == SDP_MIN_TX_CREDITS &&
!ssk->sent_request &&
- ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
- ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
+ ssk->tx_ring.head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
+ ssk->tx_ring.head - ssk->tx_ring.tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
skb = sdp_stream_alloc_skb(&ssk->isk.sk,
sizeof(struct sdp_bsdh) +
/* FIXME */
BUG_ON(!skb);
ssk->sent_request = SDP_MAX_SEND_SKB_FRAGS * PAGE_SIZE;
- ssk->sent_request_head = ssk->tx_head;
+ ssk->sent_request_head = ssk->tx_ring.head;
req_size = (struct sdp_chrecvbuf *)skb_put(skb, sizeof *req_size);
req_size->size = htonl(ssk->sent_request);
sdp_post_send(ssk, skb, SDP_MID_CHRCVBUF);
c *= 2;
if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- likely(ssk->tx_credits > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ likely(ssk->tx_ring.credits > 1) &&
+ likely(ssk->tx_ring.head - ssk->tx_ring.tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
skb = sdp_stream_alloc_skb(&ssk->isk.sk,
sdp_post_send(ssk, skb, SDP_MID_DATA);
}
+ /* send DisConn if needed
+ * Do not send DisConn if there is only 1 credit. Compliance with CA4-82:
+ * If one credit is available, an implementation shall only send SDP
+ * messages that provide additional credits and also do not contain ULP
+ * payload. */
if (unlikely(ssk->sdp_disconnect) &&
!ssk->isk.sk.sk_send_head &&
- ssk->tx_credits > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ ssk->tx_ring.credits > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
skb = sdp_stream_alloc_skb(&ssk->isk.sk,
sizeof(struct sdp_bsdh),
printk(KERN_WARNING "SDP BUG! mseq %d != wrid %d\n",
ssk->mseq_ack, (int)wc->wr_id);
- SDPSTATS_HIST_LINEAR(credits_before_update, ssk->tx_credits);
- ssk->tx_credits = ntohl(h->mseq_ack) - ssk->tx_head + 1 +
+ SDPSTATS_HIST_LINEAR(credits_before_update, ssk->tx_ring.credits);
+ ssk->tx_ring.credits = ntohl(h->mseq_ack) - ssk->tx_ring.head + 1 +
ntohs(h->bufs);
frags = skb_shinfo(skb)->nr_frags;
sdp_set_error(sk, -ECONNRESET);
wake_up(&ssk->wq);
- queue_work(sdp_workqueue, &ssk->destroy_work);
+ queue_work(comp_wq, &ssk->destroy_work);
}
goto out;
}
int sdp_poll_cq(struct sdp_sock *ssk, struct ib_cq *cq)
{
+ struct ib_wc ibwc[SDP_NUM_WC];
int n, i;
int ret = -EAGAIN;
do {
- n = ib_poll_cq(cq, SDP_NUM_WC, ssk->ibwc);
+ n = ib_poll_cq(cq, SDP_NUM_WC, ibwc);
for (i = 0; i < n; ++i) {
- sdp_handle_wc(ssk, ssk->ibwc + i);
+ sdp_handle_wc(ssk, &ibwc[i]);
ret = 0;
}
} while (n == SDP_NUM_WC);
struct sock *sk = &ssk->isk.sk;
sdp_post_recvs(ssk);
+
+ /* update credits */
sdp_post_sends(ssk, 0);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
sk_stream_write_space(&ssk->isk.sk);
+ } else {
+ SDPSTATS_COUNTER_INC(rx_poll_miss);
}
return ret;
cq = ssk->cq;
if (unlikely(!cq))
goto out;
- ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ sdp_arm_cq(sk);
sdp_poll_cq(ssk, cq);
out:
release_sock(sk);
sdp_dbg(sk, "%s\n", __func__);
- sdp_sk(sk)->tx_head = 1;
- sdp_sk(sk)->tx_tail = 1;
+ sdp_sk(sk)->tx_ring.head = 1;
+ sdp_sk(sk)->tx_ring.tail = 1;
sdp_sk(sk)->rx_head = 1;
sdp_sk(sk)->rx_tail = 1;
- sdp_sk(sk)->tx_ring = kmalloc(sizeof *sdp_sk(sk)->tx_ring * SDP_TX_SIZE,
- GFP_KERNEL);
- if (!sdp_sk(sk)->tx_ring) {
+ sdp_sk(sk)->tx_ring.buffer = kmalloc(sizeof(*sdp_sk(sk)->tx_ring.buffer) *
+ (SDP_TX_SIZE + 1), GFP_KERNEL);
+ if (!sdp_sk(sk)->tx_ring.buffer) {
rc = -ENOMEM;
sdp_warn(sk, "Unable to allocate TX Ring size %zd.\n",
- sizeof *sdp_sk(sk)->tx_ring * SDP_TX_SIZE);
+ sizeof *sdp_sk(sk)->tx_ring.buffer * (SDP_TX_SIZE + 1));
goto err_tx;
}
kfree(sdp_sk(sk)->rx_ring);
sdp_sk(sk)->rx_ring = NULL;
err_rx:
- kfree(sdp_sk(sk)->tx_ring);
- sdp_sk(sk)->tx_ring = NULL;
+ WARN_ON(sdp_sk(sk)->tx_ring.head != sdp_sk(sk)->tx_ring.tail);
+ kfree(sdp_sk(sk)->tx_ring.buffer);
+ sdp_sk(sk)->tx_ring.buffer = NULL;
err_tx:
return rc;
}
sdp_add_sock(sdp_sk(child));
- sdp_sk(child)->max_bufs = sdp_sk(child)->tx_credits = ntohs(h->bsdh.bufs);
- sdp_sk(child)->min_bufs = sdp_sk(child)->tx_credits / 4;
+ sdp_sk(child)->max_bufs = sdp_sk(child)->tx_ring.credits = ntohs(h->bsdh.bufs);
+ sdp_sk(child)->min_bufs = sdp_sk(child)->tx_ring.credits / 4;
sdp_sk(child)->xmit_size_goal = ntohl(h->localrcvsz) -
sizeof(struct sdp_bsdh);
sdp_sk(child)->send_frags = PAGE_ALIGN(sdp_sk(child)->xmit_size_goal) /
PAGE_SIZE;
sdp_init_buffers(sdp_sk(child), rcvbuf_initial_size);
-
sdp_dbg(child, "%s recv_frags: %d tx credits %d xmit_size_goal %d send trigger %d\n",
__func__,
sdp_sk(child)->recv_frags,
- sdp_sk(child)->tx_credits,
+ sdp_sk(child)->tx_ring.credits,
sdp_sk(child)->xmit_size_goal,
sdp_sk(child)->min_bufs);
h = event->param.conn.private_data;
SDP_DUMP_PACKET(sk, "RX", NULL, &h->bsdh);
- sdp_sk(sk)->max_bufs = sdp_sk(sk)->tx_credits = ntohs(h->bsdh.bufs);
- sdp_sk(sk)->min_bufs = sdp_sk(sk)->tx_credits / 4;
+ sdp_sk(sk)->max_bufs = sdp_sk(sk)->tx_ring.credits = ntohs(h->bsdh.bufs);
+ sdp_sk(sk)->min_bufs = sdp_sk(sk)->tx_ring.credits / 4;
sdp_sk(sk)->xmit_size_goal = ntohl(h->actrcvsz) -
sizeof(struct sdp_bsdh);
sdp_sk(sk)->send_frags = MIN(PAGE_ALIGN(sdp_sk(sk)->xmit_size_goal) /
sdp_dbg(sk, "%s bufs %d xmit_size_goal %d send_frags: %d send trigger %d\n",
__func__,
- sdp_sk(sk)->tx_credits,
+ sdp_sk(sk)->tx_ring.credits,
sdp_sk(sk)->xmit_size_goal,
sdp_sk(sk)->send_frags,
sdp_sk(sk)->min_bufs);
sdp_sk(sk)->poll_cq = 1;
- ib_req_notify_cq(sdp_sk(sk)->cq, IB_CQ_NEXT_COMP);
+ sdp_arm_cq(sk);
sdp_poll_cq(sdp_sk(sk), sdp_sk(sk)->cq);
sk->sk_state_change(sk);
module_param_named(sdp_zcopy_thresh, sdp_zcopy_thresh, int, 0644);
MODULE_PARM_DESC(sdp_zcopy_thresh, "Zero copy send threshold; 0=0ff.");
-struct workqueue_struct *sdp_workqueue;
+struct workqueue_struct *comp_wq;
struct list_head sock_list;
spinlock_t sock_list_lock;
cq = ssk->cq;
ssk->cq = NULL;
ib_destroy_qp(ssk->qp);
+ ssk->qp = NULL;
while (ssk->rx_head != ssk->rx_tail) {
struct sk_buff *skb;
atomic_sub(SDP_MAX_SEND_SKB_FRAGS, &sdp_current_mem_usage);
__kfree_skb(skb);
}
- while (ssk->tx_head != ssk->tx_tail) {
+ while (ssk->tx_ring.head != ssk->tx_ring.tail) {
struct sk_buff *skb;
- skb = sdp_send_completion(ssk, ssk->tx_tail);
+ skb = sdp_send_completion(ssk, ssk->tx_ring.tail);
if (!skb)
break;
__kfree_skb(skb);
if (cq)
ib_destroy_cq(cq);
- if (ssk->mr)
+ if (ssk->mr) {
ib_dereg_mr(ssk->mr);
+ ssk->mr = NULL;
+ }
if (pd)
ib_dealloc_pd(pd);
kfree(ssk->rx_ring);
ssk->rx_ring = NULL;
}
- if (ssk->tx_ring) {
- kfree(ssk->tx_ring);
- ssk->tx_ring = NULL;
+ if (ssk->tx_ring.buffer) {
+ kfree(ssk->tx_ring.buffer);
+ ssk->tx_ring.buffer = NULL;
}
}
-
static void sdp_reset_keepalive_timer(struct sock *sk, unsigned long len)
{
struct sdp_sock *ssk = sdp_sk(sk);
sdp_dbg(sk, "%s\n", __func__);
- ssk->keepalive_tx_head = ssk->tx_head;
+ ssk->keepalive_tx_head = ssk->tx_ring.head;
ssk->keepalive_rx_head = ssk->rx_head;
sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
sk->sk_state == TCP_CLOSE)
goto out;
- if (ssk->keepalive_tx_head == ssk->tx_head &&
+ if (ssk->keepalive_tx_head == ssk->tx_ring.head &&
ssk->keepalive_rx_head == ssk->rx_head)
sdp_post_keepalive(ssk);
if (!(sk->sk_shutdown & RCV_SHUTDOWN) || !sk_stream_memory_free(sk))
sdp_set_error(sk, rc);
+ sdp_destroy_qp(ssk);
+
memset((void *)&ssk->id, 0, sizeof(*ssk) - offsetof(typeof(*ssk), id));
sk->sk_state_change(sk);
/* Don't destroy socket before destroy work does its job */
sock_hold(sk, SOCK_REF_RESET);
- queue_work(sdp_workqueue, &ssk->destroy_work);
+ queue_work(comp_wq, &ssk->destroy_work);
read_unlock(&device_removal_lock);
}
/* Like inet_csk_accept */
static struct sock *sdp_accept(struct sock *sk, int flags, int *err)
{
- struct sdp_sock *newssk, *ssk;
+ struct sdp_sock *newssk = NULL, *ssk;
struct sock *newsk;
int error;
if (newssk->cq) {
sdp_dbg(newsk, "%s: ib_req_notify_cq\n", __func__);
newssk->poll_cq = 1;
- ib_req_notify_cq(newssk->cq, IB_CQ_NEXT_COMP);
+ sdp_arm_cq(&newssk->isk.sk);
sdp_poll_cq(newssk, newssk->cq);
}
release_sock(newsk);
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
answ = 0;
else
- answ = ssk->write_seq - ssk->snd_una;
+ answ = ssk->write_seq - ssk->tx_ring.una_seq;
break;
default:
return -ENOIOCTLCMD;
{
sdp_dbg(&ssk->isk.sk, "Starting dreq wait timeout\n");
- queue_delayed_work(sdp_workqueue, &ssk->dreq_wait_work, timeo);
+ queue_delayed_work(comp_wq, &ssk->dreq_wait_work, timeo);
ssk->dreq_wait_timeout = 1;
}
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_NO_CSUM;
ssk->rx_ring = NULL;
- ssk->tx_ring = NULL;
+ ssk->tx_ring.buffer = NULL;
ssk->sdp_disconnect = 0;
ssk->destructed_already = 0;
ssk->destruct_in_process = 0;
{
int min_free;
- min_free = MIN(ssk->tx_credits, SDP_TX_SIZE - (ssk->tx_head - ssk->tx_tail));
+ min_free = MIN(ssk->tx_ring.credits,
+ SDP_TX_SIZE - (ssk->tx_ring.head - ssk->tx_ring.tail));
if (min_free < SDP_MIN_TX_CREDITS)
return 0;
struct sock *sk = &ssk->isk.sk;
struct socket *sock = sk->sk_socket;
- if (ssk->tx_credits >= ssk->min_bufs &&
- ssk->tx_head == ssk->tx_tail &&
+ if (ssk->tx_ring.credits >= ssk->min_bufs &&
+ ssk->tx_ring.head == ssk->tx_ring.tail &&
sock != NULL) {
clear_bit(SOCK_NOSPACE, &sock->flags);
static int __init sdp_init(void)
{
- int rc;
+ int rc = -ENOMEM;
INIT_LIST_HEAD(&sock_list);
spin_lock_init(&sock_list_lock);
spin_lock_init(&sdp_large_sockets_lock);
sockets_allocated = kmalloc(sizeof(*sockets_allocated), GFP_KERNEL);
+ if (!sockets_allocated)
+ goto no_mem_sockets_allocated;
+
orphan_count = kmalloc(sizeof(*orphan_count), GFP_KERNEL);
+ if (!orphan_count)
+ goto no_mem_orphan_count;
+
percpu_counter_init(sockets_allocated, 0);
percpu_counter_init(orphan_count, 0);
sdp_proto.sockets_allocated = sockets_allocated;
sdp_proto.orphan_count = orphan_count;
-
- sdp_workqueue = create_singlethread_workqueue("sdp");
- if (!sdp_workqueue) {
- return -ENOMEM;
- }
+ comp_wq = create_singlethread_workqueue("comp_wq");
+ if (!comp_wq)
+ goto no_mem_rx_wq;
rc = proto_register(&sdp_proto, 1);
- if (rc) {
- printk(KERN_WARNING "%s: proto_register failed: %d\n", __func__, rc);
- destroy_workqueue(sdp_workqueue);
- return rc;
- }
+ if (rc)
+ goto error_proto_reg;
rc = sock_register(&sdp_net_proto);
- if (rc) {
- printk(KERN_WARNING "%s: sock_register failed: %d\n", __func__, rc);
- proto_unregister(&sdp_proto);
- destroy_workqueue(sdp_workqueue);
- return rc;
- }
+ if (rc)
+ goto error_sock_reg;
sdp_proc_init();
ib_register_client(&sdp_client);
return 0;
+
+error_sock_reg:
+ proto_unregister(&sdp_proto);
+error_proto_reg:
+ destroy_workqueue(comp_wq);
+no_mem_rx_wq:
+ kfree(orphan_count);
+no_mem_orphan_count:
+ kfree(sockets_allocated);
+no_mem_sockets_allocated:
+ return rc;
}
static void __exit sdp_exit(void)
if (percpu_counter_read_positive(orphan_count))
printk(KERN_WARNING "%s: orphan_count %lld\n", __func__,
percpu_counter_read_positive(orphan_count));
- destroy_workqueue(sdp_workqueue);
+
+ destroy_workqueue(comp_wq);
+
flush_scheduled_work();
BUG_ON(!list_empty(&sock_list));
uid = sock_i_uid(sk);
inode = sock_i_ino(sk);
rx_queue = sdp_sk(sk)->rcv_nxt - sdp_sk(sk)->copied_seq;
- tx_queue = sdp_sk(sk)->write_seq - sdp_sk(sk)->snd_una;
+ tx_queue = sdp_sk(sk)->write_seq - sdp_sk(sk)->tx_ring.una_seq;
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %5d %lu %08X:%08X %X",
st->num, src, srcp, dest, destp, uid, inode,