#define SDPSTATS_ON
/* #define SDP_PROFILING */
-#define _sdp_printk(func, line, level, sk, format, arg...) \
+#define _sdp_printk(func, line, level, sk, format, arg...) do { \
+ preempt_disable(); \
printk(level "%s:%d sdp_sock(%5d:%d %d:%d): " format, \
func, line, \
current->pid, smp_processor_id(), \
(sk) ? inet_sk(sk)->num : -1, \
- (sk) ? ntohs(inet_sk(sk)->dport) : -1, ## arg)
+ (sk) ? ntohs(inet_sk(sk)->dport) : -1, ## arg); \
+ preempt_enable(); \
+} while (0)
#define sdp_printk(level, sk, format, arg...) \
_sdp_printk(__func__, __LINE__, level, sk, format, ## arg)
#define sdp_warn(sk, format, arg...) \
#define sdp_prf1(sk, s, format, arg...) ({ \
struct sdpprf_log *l = \
&sdpprf_log[sdpprf_log_count++ & (SDPPRF_LOG_SIZE - 1)]; \
+ preempt_disable(); \
l->idx = sdpprf_log_count - 1; \
l->pid = current->pid; \
l->sk_num = (sk) ? inet_sk(sk)->num : -1; \
l->time = current_nsec(); \
l->func = __func__; \
l->line = __LINE__; \
+ preempt_enable(); \
1; \
})
#define sdp_prf(sk, s, format, arg...)
-EINVAL : 0;
}
- lock_sock(sk);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sdp_dbg(sk, "%s event %d id %p\n", __func__, event->event, id);
if (!sdp_sk(sk)->id) {
sdp_dbg(sk, "socket is being torn down\n");
static void sdp_destroy_qp(struct sdp_sock *ssk)
{
struct ib_pd *pd = NULL;
- unsigned long flags;
sdp_dbg(&ssk->isk.sk, "destroying qp\n");
del_timer(&ssk->tx_ring.timer);
- rx_ring_lock(ssk, flags);
sdp_rx_ring_destroy(ssk);
sdp_tx_ring_destroy(ssk);
ib_dealloc_pd(pd);
sdp_remove_large_sock(ssk);
-
- rx_ring_unlock(ssk, flags);
-
}
static void sdp_reset_keepalive_timer(struct sock *sk, unsigned long len)
sdp_set_error(sk, rc);
}
- sdp_destroy_qp(ssk);
-
- memset((void *)&ssk->id, 0, sizeof(*ssk) - offsetof(typeof(*ssk), id));
-
sk->sk_state_change(sk);
/* Don't destroy socket before destroy work does its job */
struct sock *sk = &ssk->isk.sk;
sdp_dbg(sk, "%s: refcnt %d\n", __func__, atomic_read(&sk->sk_refcnt));
+ sdp_destroy_qp(ssk);
+
+ memset((void *)&ssk->id, 0, sizeof(*ssk) - offsetof(typeof(*ssk), id));
+
sdp_cancel_dreq_wait_timeout(ssk);
if (sk->sk_state == TCP_TIME_WAIT)
sdp_proc_unregister();
ib_unregister_client(&sdp_client);
+
+ percpu_counter_destroy(sockets_allocated);
+ percpu_counter_destroy(orphan_count);
+
kfree(orphan_count);
kfree(sockets_allocated);
}
break;
case SDP_MID_CHRCVBUF:
sdp_dbg_data(sk, "Handling RX CHRCVBUF\n");
- sdp_handle_resize_request(ssk, (struct sdp_chrecvbuf *)h);
+ sdp_handle_resize_request(ssk, (struct sdp_chrecvbuf *)(h+1));
__kfree_skb(skb);
break;
case SDP_MID_CHRCVBUF_ACK:
sdp_dbg_data(sk, "Handling RX CHRCVBUF_ACK\n");
- sdp_handle_resize_ack(ssk, (struct sdp_chrecvbuf *)h);
+ sdp_handle_resize_ack(ssk, (struct sdp_chrecvbuf *)(h+1));
__kfree_skb(skb);
break;
default:
{
struct ib_cq *rx_cq;
int rc = 0;
- unsigned long flags;
-
- rx_ring_lock(ssk, flags);
atomic_set(&ssk->rx_ring.head, 1);
atomic_set(&ssk->rx_ring.tail, 1);
ssk->rx_ring.buffer = kmalloc(
sizeof *ssk->rx_ring.buffer * SDP_RX_SIZE, GFP_KERNEL);
if (!ssk->rx_ring.buffer) {
- rc = -ENOMEM;
sdp_warn(&ssk->isk.sk,
"Unable to allocate RX Ring size %zd.\n",
sizeof(*ssk->rx_ring.buffer) * SDP_RX_SIZE);
- goto out;
+ return -ENOMEM;
}
/* TODO: use vector=IB_CQ_VECTOR_LEAST_ATTACHED when implemented
sdp_arm_rx_cq(&ssk->isk.sk);
- goto out;
+ return 0;
err_cq:
kfree(ssk->rx_ring.buffer);
ssk->rx_ring.buffer = NULL;
-out:
- rx_ring_unlock(ssk, flags);
return rc;
}