if (data_was_unread ||
(sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
/* Unread data was tossed, zap the connection. */
- NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
+ NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
sdp_exch_state(sk, TCPF_CLOSE_WAIT | TCPF_ESTABLISHED,
TCP_TIME_WAIT);
if (offset < skb->len)
goto found_ok_skb;
- BUG_TRAP(flags & MSG_PEEK);
+ WARN_ON(!(flags & MSG_PEEK));
skb = skb->next;
} while (skb != (struct sk_buff *)&sk->sk_receive_queue);
return mask;
}
-static void sdp_enter_memory_pressure(void)
+static void sdp_enter_memory_pressure(struct sock *sk)
{
- sdp_dbg(NULL, "%s\n", __func__);
+ sdp_dbg(sk, "%s\n", __func__);
}
void sdp_urg(struct sdp_sock *ssk, struct sk_buff *skb)