#define SDP_OP_RDMA 0x200000000LL
#define SDP_OP_NOP 0x100000000LL
+#ifdef SDP_SOCK_HISTORY
+#define SDP_SOCK_HISTORY_LEN 128
+#endif
+
/* how long (in jiffies) to block sender till tx completion*/
#define SDP_BZCOPY_POLL_TIMEOUT (HZ / 10)
} while (0)
#define sdp_common_release(sk) do { \
- sdp_dbg(sk, "%s:%d - sock_put(" SOCK_REF_ALIVE \
+ sdp_dbg(sk, "%s:%d - sock_put(SOCK_REF_ALIVE" \
") - refcount = %d from withing sk_common_release\n",\
__func__, __LINE__, atomic_read(&(sk)->sk_refcnt));\
percpu_counter_inc((sk)->sk_prot->orphan_count);\
+ sdp_add_to_history(sk, "sdp_common_release"); \
+ _sdp_add_to_history(sk, "SOCK_REF_ALIVE", __func__, __LINE__, \
+ 2, SOCK_REF_ALIVE); \
sk_common_release(sk); \
} while (0)
int moder_time;
};
+#ifdef SDP_SOCK_HISTORY
+enum sdp_ref_type {
+ NOT_REF,
+ HOLD_REF,
+ PUT_REF,
+ __PUT_REF,
+ BOTH_REF
+};
+
+struct sdp_sock_hist {
+ char *str;
+ char *func;
+ int line;
+ int pid;
+ u8 cnt;
+ u8 ref_type; /* enum sdp_ref_type */
+ u8 ref_enum; /* enum sdp_ref */
+};
+#endif /* SDP_SOCK_HISTORY */
+
struct sdp_sock {
/* sk has to be the first member of inet_sock */
struct inet_sock isk;
struct sdp_device *sdp_dev;
int cpu;
+#ifdef SDP_SOCK_HISTORY
+ struct sdp_sock_hist hst[SDP_SOCK_HISTORY_LEN];
+ unsigned hst_idx; /* next free slot */
+ spinlock_t hst_lock;
+#endif /* SDP_SOCK_HISTORY */
+
int qp_active;
spinlock_t tx_sa_lock;
struct tx_srcavail_state *tx_sa;
return (struct sdp_sock *)sk;
}
+#ifdef SDP_SOCK_HISTORY
+static inline char *reftype2str(int reftype)
+{
+#define ENUM2STR(e) [e] = #e
+ static char *enum2str[] = {
+ ENUM2STR(NOT_REF),
+ ENUM2STR(HOLD_REF),
+ ENUM2STR(PUT_REF),
+ ENUM2STR(__PUT_REF),
+ ENUM2STR(BOTH_REF)
+ };
+
+ if (reftype < 0 || reftype >= ARRAY_SIZE(enum2str)) {
+ printk(KERN_WARNING "reftype %d is illegal\n", reftype);
+ return NULL;
+ }
+
+ return enum2str[reftype];
+}
+
+static inline void sdp_print_history(struct sock *sk)
+{
+ struct sdp_sock *ssk = sdp_sk(sk);
+ unsigned i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ssk->hst_lock, flags);
+
+ sdp_warn(sk, "############## %p %s %u/%lu ##############\n",
+ sk, sdp_state_str(sk->sk_state),
+ ssk->hst_idx, ARRAY_SIZE(ssk->hst));
+
+ for (i = 0; i < ssk->hst_idx; ++i) {
+ struct sdp_sock_hist *hst = &ssk->hst[i];
+ char *ref_str = reftype2str(hst->ref_type);
+
+ if (hst->ref_type == NOT_REF)
+ ref_str = "";
+
+ if (hst->cnt != 1) {
+ sdp_warn(sk, "[%s:%d pid: %d] %s %s : %d\n",
+ hst->func, hst->line, hst->pid,
+ ref_str, hst->str, hst->cnt);
+ } else {
+ sdp_warn(sk, "[%s:%d pid: %d] %s %s\n",
+ hst->func, hst->line, hst->pid,
+ ref_str, hst->str);
+ }
+ }
+
+ spin_unlock_irqrestore(&ssk->hst_lock, flags);
+}
+
+static inline void _sdp_add_to_history(struct sock *sk, const char *str,
+ const char *func, int line, int ref_type, int ref_enum)
+{
+ struct sdp_sock *ssk = sdp_sk(sk);
+ unsigned i;
+ unsigned long flags;
+ struct sdp_sock_hist *hst;
+
+ spin_lock_irqsave(&ssk->hst_lock, flags);
+
+ i = ssk->hst_idx;
+
+ if (i >= ARRAY_SIZE(ssk->hst)) {
+ //sdp_warn(sk, "overflow, drop: %s\n", s);
+ ++ssk->hst_idx;
+ goto out;
+ }
+
+ if (ssk->hst[i].str)
+ sdp_warn(sk, "overwriting %s\n", ssk->hst[i].str);
+
+ switch (ref_type) {
+ case NOT_REF:
+ case HOLD_REF:
+simple_add:
+ hst = &ssk->hst[i];
+ hst->str = (char *)str;
+ hst->func = (char *)func;
+ hst->line = line;
+ hst->ref_type = ref_type;
+ hst->ref_enum = ref_enum;
+ hst->cnt = 1;
+ hst->pid = current->pid;
+ ++ssk->hst_idx;
+ break;
+ case PUT_REF:
+ case __PUT_REF:
+ /* Try to shrink history by attaching HOLD+PUT
+ * together */
+ hst = i > 0 ? &ssk->hst[i - 1] : NULL;
+ if (hst && hst->ref_type == HOLD_REF &&
+ hst->ref_enum == ref_enum) {
+ hst->ref_type = BOTH_REF;
+ hst->func = (char *)func;
+ hst->line = line;
+ hst->pid = current->pid;
+
+ /* try to shrink some more - by summing up */
+ --i;
+ hst = i > 0 ? &ssk->hst[i - 1] : NULL;
+ if (hst && hst->ref_type == BOTH_REF &&
+ hst->ref_enum == ref_enum) {
+ ++hst->cnt;
+ hst->func = (char *)func;
+ hst->line = line;
+ hst->pid = current->pid;
+ ssk->hst[i].str = NULL;
+
+ --ssk->hst_idx;
+ }
+ } else
+ goto simple_add;
+ break;
+ default:
+ sdp_warn(sk, "error\n");
+ }
+out:
+ spin_unlock_irqrestore(&ssk->hst_lock, flags);
+}
+
+#define sdp_add_to_history(sk, str) \
+ _sdp_add_to_history(sk, str, __func__, __LINE__, 0, 0)
+#else
+static inline void _sdp_add_to_history(struct sock *sk, const char *str,
+ const char *func, int line, int ref_type, int ref_enum)
+{
+}
+#define sdp_add_to_history(sk, str)
+#endif /* SDP_SOCK_HISTORY */
+
static inline int _sdp_exch_state(const char *func, int line, struct sock *sk,
int from_states, int state)
{
spin_unlock_irqrestore(&sdp_sk(sk)->lock, flags);
+ sdp_add_to_history(sk, sdp_state_str(state));
+
return old;
}
#define sdp_exch_state(sk, from_states, state) \
-EINVAL : 0;
}
+ sdp_add_to_history(sk, rdma_cm_event_str(event->event));
+
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sdp_dbg(sk, "event: %s\n", rdma_cm_event_str(event->event));
if (!sdp_sk(sk)->id) {
#define SDPSTATS_ON
+#ifdef CONFIG_INFINIBAND_SDP_DEBUG
+#define SDP_SOCK_HISTORY
+#endif
+
#ifdef CONFIG_INFINIBAND_SDP_DEBUG_DATA
#define SDP_PROFILING
#endif
if (!atomic_read(&(sk)->sk_refcnt)) {\
sdp_warn(sk, "%s:%d - %s (%s) ref = 0.\n", \
__func__, __LINE__, #sock_op, msg); \
+ sdp_print_history(sk); \
SDP_WARN_ON(1); \
} else { \
sdp_dbg(sk, "%s:%d - %s (%s) ref = %d.\n", __func__, __LINE__, \
#define SDP_DUMP_PACKET(sk, str, skb, h)
#endif
-#define SOCK_REF_RESET "RESET"
-#define SOCK_REF_ALIVE "ALIVE" /* sock_alloc -> destruct_sock */
-#define SOCK_REF_CLONE "CLONE"
-#define SOCK_REF_CMA "CMA" /* sdp_cma_handler() is expected to be invoked */
-#define SOCK_REF_SEQ "SEQ" /* during proc read */
-#define SOCK_REF_DREQ_TO "DREQ_TO" /* dreq timeout is pending */
-#define SOCK_REF_ZCOPY "ZCOPY" /* zcopy send in process */
-#define SOCK_REF_RDMA_RD "RDMA_RD" /* RDMA read in process */
-#define SOCK_REF_KEEPALIVE "KEEPALIVE" /* socket is held by sk_reset_timer */
-
-#define sock_hold(sk, msg) sock_ref(sk, msg, sock_hold)
-#define sock_put(sk, msg) sock_ref(sk, msg, sock_put)
-#define __sock_put(sk, msg) sock_ref(sk, msg, __sock_put)
+enum sdp_ref {
+ SOCK_REF_RESET,
+ SOCK_REF_ALIVE, /* sock_alloc -> destruct_sock */
+ SOCK_REF_CLONE,
+ SOCK_REF_CMA, /* sdp_cma_handler is expected to be invoked */
+ SOCK_REF_SEQ, /* during proc read */
+ SOCK_REF_DREQ_TO, /* dreq timeout is pending */
+ SOCK_REF_ZCOPY, /* zcopy send in process */
+ SOCK_REF_RDMA_RD, /* RDMA read in process */
+ SOCK_REF_KEEPALIVE /* socket is held by sk_reset_timer */
+};
+
+#ifdef SDP_SOCK_HISTORY
+#define sock_hold(sk, msg) \
+ do { \
+ _sdp_add_to_history(sk, #msg, __func__, __LINE__, \
+ HOLD_REF, msg); \
+ sock_ref(sk, #msg, sock_hold); \
+ } while (0)
+
+#define sock_put(sk, msg) \
+ do { \
+ _sdp_add_to_history(sk, #msg, __func__, __LINE__, \
+ PUT_REF, msg); \
+ sock_ref(sk, #msg, sock_put); \
+ } while (0)
+
+#define __sock_put(sk, msg) \
+ do { \
+ _sdp_add_to_history(sk, #msg, __func__, __LINE__, \
+ __PUT_REF, msg); \
+ sock_ref(sk, #msg, __sock_put); \
+ } while (0)
+#else
+#define sock_hold(sk, msg) sock_ref(sk, #msg, sock_hold)
+#define sock_put(sk, msg) sock_ref(sk, #msg, sock_put)
+#define __sock_put(sk, msg) sock_ref(sk, #msg, __sock_put)
+#endif /* SDP_SOCK_HISTORY */
#define ENUM2STR(e) [e] = #e
.sin_addr.s_addr = inet_sk(sk)->rcv_saddr,
};
+ sdp_add_to_history(sk, __func__);
sdp_dbg(sk, "%s: %u.%u.%u.%u:%hu\n", __func__,
NIPQUAD(addr.sin_addr.s_addr), ntohs(addr.sin_port));
sdp_dbg(&ssk->isk.sk, "destroying qp\n");
sdp_prf(&ssk->isk.sk, NULL, "destroying qp");
+ sdp_add_to_history(&ssk->isk.sk, __func__);
ssk->qp_active = 0;
if (ssk->qp) {
return;
}
+ sdp_add_to_history(sk, __func__);
percpu_counter_dec(sk->sk_prot->orphan_count);
ssk->destructed_already = 1;
struct sk_buff *skb;
int data_was_unread = 0;
+ sdp_add_to_history(sk, __func__);
lock_sock(sk);
sdp_dbg(sk, "%s\n", __func__);
};
int rc;
+ sdp_add_to_history(sk, __func__);
ssk->cpu = smp_processor_id();
release_sock(sk);
flush_workqueue(sdp_wq);
struct sock *newsk;
int error;
+ sdp_add_to_history(sk, __func__);
sdp_dbg(sk, "%s state %d expected %d *err %d\n", __func__,
sk->sk_state, TCP_LISTEN, *err);
struct sdp_sock *ssk = sdp_sk(sk);
int answ;
+ sdp_add_to_history(sk, __func__);
sdp_dbg(sk, "%s\n", __func__);
switch (cmd) {
ssk->zcopy_thresh = -1; /* use global sdp_zcopy_thresh */
ssk->last_bind_err = 0;
+#ifdef SDP_SOCK_HISTORY
+ memset(ssk->hst, 0, sizeof ssk->hst);
+ ssk->hst_idx = 0;
+ spin_lock_init(&ssk->hst_lock);
+#endif
+
return 0;
}
{
struct sdp_sock *ssk = sdp_sk(sk);
+ sdp_add_to_history(sk, __func__);
sdp_dbg(sk, "%s\n", __func__);
if (!(how & SEND_SHUTDOWN))
return;
int val;
int err = 0;
+ sdp_add_to_history(sk, __func__);
sdp_dbg(sk, "%s\n", __func__);
if (optlen < sizeof(int))
return -EINVAL;
struct sdp_sock *ssk = sdp_sk(sk);
int val, len;
+ sdp_add_to_history(sk, __func__);
sdp_dbg(sk, "%s\n", __func__);
if (level != SOL_TCP)
int rc;
sdp_dbg(sk, "%s\n", __func__);
+ sdp_add_to_history(sk, __func__);
if (!ssk->id) {
rc = sdp_get_port(sk, 0);
return -ENOMEM;
}
+ sdp_add_to_history(sk, __func__);
sk->sk_destruct = sdp_destruct;
sock->ops = &sdp_proto_ops;
sock->state = SS_UNCONNECTED;
if (ssk->ib_device == device && !ssk->id_destroyed_already) {
spin_unlock_irq(&sock_list_lock);
sk = &ssk->isk.sk;
+ sdp_add_to_history(sk, __func__);
lock_sock(sk);
/* ssk->id must be lock-protected,
* to enable mutex with sdp_close() */