#include <rdma/ib_verbs.h>
#define sdp_printk(level, sk, format, arg...) \
- printk(level "sdp_sock(%d:%d): " format, \
+ printk(level "%s:%d sdp_sock(%d:%d): " format, \
+ __func__, __LINE__, \
(sk) ? inet_sk(sk)->num : -1, \
(sk) ? ntohs(inet_sk(sk)->dport) : -1, ## arg)
#define sdp_warn(sk, format, arg...) \
if (sdp_debug_level > 0) \
sdp_printk(KERN_DEBUG, sk, format , ## arg); \
} while (0)
+
+#define sock_ref(sk, msg, sock_op) ({ \
+ if (!atomic_read(&(sk)->sk_refcnt)) {\
+ sdp_warn(sk, "%s:%d - %s (%s) ref = 0.\n", \
+ __func__, __LINE__, #sock_op, msg); \
+ WARN_ON(1); \
+ } else { \
+ sdp_dbg(sk, "%s:%d - %s (%s) ref = %d.\n", __func__, __LINE__, \
+ #sock_op, msg, atomic_read(&(sk)->sk_refcnt)); \
+ sock_op(sk); \
+ }\
+})
+
+#define sk_common_release(sk) do { \
+ sdp_dbg(sk, "%s:%d - sock_put(" SOCK_REF_BORN ") - refcount = %d " \
+ "from withing sk_common_release\n",\
+ __FUNCTION__, __LINE__, atomic_read(&(sk)->sk_refcnt)); \
+ sk_common_release(sk); \
+} while (0)
+
#else /* CONFIG_INFINIBAND_SDP_DEBUG */
#define sdp_dbg(priv, format, arg...) \
do { (void) (priv); } while (0)
+#define sock_ref(sk, msg, sock_op) sock_op(sk)
#endif /* CONFIG_INFINIBAND_SDP_DEBUG */
#ifdef CONFIG_INFINIBAND_SDP_DEBUG_DATA
do { (void) (priv); } while (0)
#endif
+#define SOCK_REF_RESET "RESET"
+#define SOCK_REF_BORN "BORN" /* sock_alloc -> destruct_sock */
+#define SOCK_REF_CLONE "CLONE"
+#define SOCK_REF_CM_TW "CM_TW" /* TIMEWAIT_ENTER -> TIMEWAIT_EXIT */
+#define SOCK_REF_SEQ "SEQ" /* during proc read */
+
+#define sock_hold(sk, msg) sock_ref(sk, msg, sock_hold)
+#define sock_put(sk, msg) sock_ref(sk, msg, sock_put)
+#define __sock_put(sk, msg) sock_ref(sk, msg, __sock_put)
+
#define SDP_RESOLVE_TIMEOUT 1000
#define SDP_ROUTE_TIMEOUT 1000
#define SDP_RETRY_COUNT 5
spin_lock_irqsave(&sdp_sk(sk)->lock, flags);
- sdp_dbg(sk, "%s:%d - set state: %s -> %s 0x%x\n", __func__, __LINE__,
+ sdp_dbg(sk, "%s:%d - set state: %s -> %s 0x%x\n", func, line,
sdp_state_str(sk->sk_state), sdp_state_str(state), from_states);
if ((1 << sk->sk_state) & ~from_states) {
out:
bh_unlock_sock(sk);
- sock_put(sk);
+ sock_put(sk, SOCK_REF_BORN);
}
static void sdp_init_timer(struct sock *sk)
sk->sk_state_change(sk);
/* Don't destroy socket before destroy work does its job */
- sock_hold(sk);
+ sock_hold(sk, SOCK_REF_RESET);
queue_work(sdp_workqueue, &ssk->destroy_work);
read_unlock(&device_removal_lock);
goto adjudge_to_death;
}
- sock_hold(sk);
+ sock_hold(sk, SOCK_REF_CM_TW);
/* We need to flush the recv. buffs. We do this only on the
* descriptor close, not protocol-sourced closes, because the
sdp_cancel_dreq_wait_timeout(ssk);
if (sk->sk_state == TCP_TIME_WAIT)
- sock_put(sk);
+ sock_put(sk, SOCK_REF_CM_TW);
/* In normal close current state is TCP_TIME_WAIT or TCP_CLOSE
but if a CM connection is dropped below our legs state could
be any state */
sdp_exch_state(sk, ~0, TCP_CLOSE);
- sock_put(sk);
+ sock_put(sk, SOCK_REF_RESET);
}
void sdp_dreq_wait_timeout_work(struct work_struct *work)
if (!sdp_close_state(sk))
return;
- sock_hold(sk);
+ sock_hold(sk, SOCK_REF_CM_TW);
/*
* Just turn off CORK here.
spin_lock_irq(&sock_list_lock);
start = sdp_get_idx(seq, *pos - 1);
if (start)
- sock_hold((struct sock *)start);
+ sock_hold((struct sock *)start, SOCK_REF_SEQ);
spin_unlock_irq(&sock_list_lock);
return start;
else
next = sdp_get_idx(seq, *pos);
if (next)
- sock_hold((struct sock *)next);
+ sock_hold((struct sock *)next, SOCK_REF_SEQ);
spin_unlock_irq(&sock_list_lock);
*pos += 1;
seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
- sock_put(sk);
+ sock_put(sk, SOCK_REF_SEQ);
out:
return 0;
}