#define DTRACE_SMB(name, ...) \
DTRACE_PROBE(__smb_##name, ## __VA_ARGS__);
+/*
+ * These definitions are used at probe points to specify the traffic direction;
+ * this helps simplify argument translation.
+ */
+#define DTRACE_NET_PROBE_OUTBOUND 0x0
+#define DTRACE_NET_PROBE_INBOUND 0x1
+
#define DTRACE_IP(name, ...) \
DTRACE_PROBE(__ip_##name, ## __VA_ARGS__);
+/*
+ * Default DTRACE_TCP() and DTRACE_UDP() provider definitions specify the
+ * probe point within an is-enabled predicate. This is to avoid the overhead
+ * incurred during argument dereferencing (e.g. calls to ip_hdr(skb)), along
+ * with any conditional evaluation (which would require branching) when the
+ * probe is disabled.
+ *
+ * Because some TCP probe points require additional argument preparation,
+ * we also define the is-enabled predicate directly as
+ * DTRACE_TCP_ENABLED(probename) along with a probe point which does not
+ * the probe in an is-enabled predicate; this allows us to handle cases such
+ * as this:
+ *
+ * if (DTRACE_TCP_ENABLED(state__change)) {
+ * ...argument preparation...
+ * DTRACE_TCP_NOCHECK(state__change, ...);
+ * }
+ */
+
#define DTRACE_TCP(name, ...) \
+ if (DTRACE_PROBE_ENABLED(__tcp_##name)) \
+ DTRACE_PROBE(__tcp_##name, ## __VA_ARGS__)
+#define DTRACE_TCP_ENABLED(name) \
+ DTRACE_PROBE_ENABLED(__tcp_##name)
+#define DTRACE_TCP_NOCHECK(name, ...) \
DTRACE_PROBE(__tcp_##name, ## __VA_ARGS__);
#define DTRACE_UDP(name, ...) \
- DTRACE_PROBE(__udp_##name, ## __VA_ARGS__);
+ if (DTRACE_PROBE_ENABLED(__udp_##name)) \
+ DTRACE_PROBE(__udp_##name, ## __VA_ARGS__);
#define DTRACE_SYSEVENT(name, ...) \
DTRACE_PROBE(__sysevent_##name, ## __VA_ARGS__);
#include <linux/crypto.h>
#include <linux/time.h>
#include <linux/slab.h>
+#include <linux/sdt.h>
#include <net/icmp.h>
#include <net/inet_common.h>
}
EXPORT_SYMBOL(tcp_recvmsg);
+/* We wish to avoid instrumenting TCP state transitions to SYN_SENT as we trace
+ * those state changes later once the destination address is committed to the
+ * sk. We also need to deal with the fact that separate timewait sockets are
+ * used to handle the TIME_WAIT state. We do not want to trace direct
+ * transitions from CLOSING/FIN_WAIT2 -> CLOSE since they do not represent
+ * connection close, rather a transition to using the timewait socket.
+ * Accordingly skip instrumentation of transitions from CLOSING/FIN_WAIT2 to
+ * CLOSE.
+ */
+#define REAL_STATE_CHANGE(old, new) \
+ (old != new && new != TCP_SYN_SENT && \
+ ((old != TCP_CLOSING && old != TCP_FIN_WAIT2) || new != TCP_CLOSE))
+
void tcp_set_state(struct sock *sk, int state)
{
int oldstate = sk->sk_state;
*/
sk->sk_state = state;
+ if (DTRACE_TCP_ENABLED(state__change) &&
+ REAL_STATE_CHANGE(oldstate, state))
+ DTRACE_TCP_NOCHECK(state__change,
+ struct sk_buff * : pktinfo_t *, NULL,
+ struct sock * : csinfo_t *, sk,
+ __dtrace_tcp_void_ip_t * : ipinfo_t *, NULL,
+ struct tcp_sock * : tcpsinfo_t *, tcp_sk(sk),
+ struct tcphdr * : tcpinfo_t *, NULL,
+ int : tcplsinfo_t *, oldstate,
+ int : int, state,
+ int : int, DTRACE_NET_PROBE_OUTBOUND);
+
#ifdef STATE_TRACE
SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
#endif
#include <linux/ipsec.h>
#include <asm/unaligned.h>
#include <linux/errqueue.h>
+#include <linux/sdt.h>
int sysctl_tcp_timestamps __read_mostly = 1;
int sysctl_tcp_window_scaling __read_mostly = 1;
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
+ DTRACE_TCP(connect__established,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ __dtrace_tcp_void_ip_t * : ipinfo_t *, ip_hdr(skb),
+ struct tcp_sock * : tcpsinfo_t *, tp,
+ struct tcphdr * : tcpinfo_t *, tcp_hdr(skb),
+ int : tcplsinfo_t *, TCP_ESTABLISHED,
+ int, TCP_ESTABLISHED, int, DTRACE_NET_PROBE_INBOUND);
tcp_set_state(sk, TCP_ESTABLISHED);
if (skb) {
*/
if (th->rst) {
+ DTRACE_TCP(connect__refused,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ __dtrace_tcp_void_ip_t * : ipinfo_t *,
+ ip_hdr(skb),
+ struct tcp_sock * : tcpsinfo_t *, tp,
+ struct tcphdr * : tcpinfo_t *, th,
+ int : tcplsinfo_t *,
+ sk ? sk->sk_state : TCP_CLOSE,
+ int, sk ? sk->sk_state : TCP_CLOSE,
+ int, DTRACE_NET_PROBE_INBOUND);
tcp_reset(sk);
goto discard;
}
tcp_init_buffer_space(sk);
}
smp_mb();
+
+ DTRACE_TCP(accept__established,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ __dtrace_tcp_void_ip_t * : ipinfo_t *, ip_hdr(skb),
+ struct tcp_sock * : tcpsinfo_t *, tp,
+ struct tcphdr * : tcpinfo_t *, tcp_hdr(skb),
+ int : tcplsinfo_t *, TCP_ESTABLISHED,
+ int, TCP_ESTABLISHED,
+ int, DTRACE_NET_PROBE_INBOUND);
tcp_set_state(sk, TCP_ESTABLISHED);
sk->sk_state_change(sk);
tcp_try_fastopen(sk, skb, req, &foc, dst);
err = af_ops->send_synack(sk, dst, &fl, req,
skb_get_queue_mapping(skb), &foc);
+
+ /* Do not pass in tcp sock as ports/addresses are not yet
+ * populated - instead translators will fill them in from
+ * skb data.
+ */
+ DTRACE_TCP(state__change,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ __dtrace_tcp_void_ip_t * : ipinfo_t *, ip_hdr(skb),
+ struct tcp_sock * : tcpsinfo_t *, NULL,
+ struct tcphdr * : tcpinfo_t *, tcp_hdr(skb),
+ int : tcplsinfo_t *, TCP_LISTEN,
+ int, TCP_SYN_RECV, int, DTRACE_NET_PROBE_INBOUND);
+
if (!fastopen) {
if (err || want_cookie)
goto drop_and_free;
#include <linux/crypto.h>
#include <linux/scatterlist.h>
+#include <linux/sdt.h>
int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly;
}
EXPORT_SYMBOL(tcp_v4_send_check);
+/* Since we want to trace send events in TCP prior to pushing the segment to
+ * IP - where the IP header is added - we need to construct an argument
+ * containing relevant IP info so that TCP probe consumers can utilize it.
+ */
+static inline void dtrace_tcp_build_iphdr(__be32 saddr, __be32 daddr,
+ struct iphdr *iph)
+{
+ iph->version = 4;
+ iph->ihl = 5;
+ iph->tot_len = 5;
+ iph->protocol = IPPROTO_TCP;
+ iph->saddr = saddr;
+ iph->daddr = daddr;
+}
+
/*
* This routine will send an RST to the other tcp.
*
arg.bound_dev_if = sk->sk_bound_dev_if;
arg.tos = ip_hdr(skb)->tos;
+
+ if (DTRACE_TCP_ENABLED(send) ||
+ DTRACE_TCP_ENABLED(accept__refused)) {
+ struct iphdr iph;
+
+ dtrace_tcp_build_iphdr(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr,
+ &iph);
+
+ DTRACE_TCP_NOCHECK(send,
+ struct sk_buff * : pktinfo_t *, NULL,
+ struct sock * : csinfo_t *, NULL,
+ __dtrace_tcp_void_ip_t * : ipinfo_t *, &iph,
+ struct tcp_sock * : tcpsinfo_t *, NULL,
+ struct tcphdr * : tcpinfo_t *, &rep.th,
+ int : tcplsinfo_t *, TCP_CLOSE,
+ int : int, TCP_CLOSE,
+ int : int, DTRACE_NET_PROBE_OUTBOUND);
+ if (th->syn && rep.th.seq == 0)
+ DTRACE_TCP_NOCHECK(accept__refused,
+ struct sk_buff * : pktinfo_t *, NULL,
+ struct sock * : csinfo_t *, NULL,
+ __dtrace_tcp_void_ip_t * :
+ ipinfo_t *, &iph,
+ struct tcp_sock * : tcpsinfo_t *,
+ NULL,
+ struct tcphdr * : tcpinfo_t *,
+ &rep.th,
+ int : tcplsinfo_t *, TCP_CLOSE,
+ int : int, TCP_CLOSE,
+ int : int,
+ DTRACE_NET_PROBE_OUTBOUND);
+ }
+
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, &TCP_SKB_CB(skb)->header.h4.opt,
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
if (oif)
arg.bound_dev_if = oif;
arg.tos = tos;
+
+ if (DTRACE_TCP_ENABLED(send)) {
+ struct iphdr iph;
+
+ dtrace_tcp_build_iphdr(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr,
+ &iph);
+
+ DTRACE_TCP_NOCHECK(send,
+ struct sk_buff * : pktinfo_t *, NULL,
+ struct sock * : csinfo_t *, NULL,
+ __dtrace_tcp_void_ip_t * : ipinfo_t *, &iph,
+ struct tcp_sock * : tcpsinfo_t *, NULL,
+ struct tcphdr * : tcpinfo_t *, &rep,
+ int : tcplsinfo_t *, TCP_CLOSE,
+ int : int, TCP_CLOSE,
+ int : int, DTRACE_NET_PROBE_OUTBOUND);
+ }
+
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, &TCP_SKB_CB(skb)->header.h4.opt,
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
skb_set_queue_mapping(skb, queue_mapping);
+
+ if (DTRACE_TCP_ENABLED(send)) {
+ struct iphdr iph;
+
+ dtrace_tcp_build_iphdr(ireq->ir_loc_addr,
+ ireq->ir_rmt_addr, &iph);
+
+ /* Do not supply tcp sk - addresses/ports are not
+ * committed yet - instead translators will fill them
+ * in from skb/IP info.
+ */
+ DTRACE_TCP_NOCHECK(send,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ __dtrace_tcp_void_ip_t * :
+ ipinfo_t *, &iph,
+ struct tcp_sock * : tcpsinfo_t *,
+ NULL,
+ struct tcphdr * : tcpinfo_t *,
+ tcp_hdr(skb),
+ int : tcplsinfo_t *, TCP_LISTEN,
+ int, TCP_LISTEN,
+ int, DTRACE_NET_PROBE_OUTBOUND);
+ }
+
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
ireq->opt);
{
const struct iphdr *iph;
const struct tcphdr *th;
- struct sock *sk;
+ struct sock *sk = NULL;
int ret;
struct net *net = dev_net(skb->dev);
if (!sk)
goto no_tcp_socket;
+ DTRACE_TCP(receive,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ __dtrace_tcp_void_ip_t * : ipinfo_t *, ip_hdr(skb),
+ struct tcp_sock * : tcpsinfo_t *, tcp_sk(sk),
+ struct tcphdr * : tcpinfo_t *, tcp_hdr(skb),
+ int : tcplsinfo_t *, sk ? sk->sk_state : TCP_CLOSE,
+ int, sk ? sk->sk_state : TCP_CLOSE,
+ int, DTRACE_NET_PROBE_INBOUND);
process:
if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
discard_it:
/* Discard frame. */
+ if (DTRACE_TCP_ENABLED(receive) && skb->pkt_type == PACKET_HOST)
+ DTRACE_TCP_NOCHECK(receive,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ __dtrace_tcp_void_ip_t * : ipinfo_t *,
+ ip_hdr(skb),
+ struct tcp_sock * : tcpsinfo_t *, tcp_sk(sk),
+ struct tcphdr * : tcpinfo_t *, tcp_hdr(skb),
+ int : tcplsinfo_t *,
+ sk ? sk->sk_state : TCP_CLOSE,
+ int, sk ? sk->sk_state : TCP_CLOSE,
+ int, DTRACE_NET_PROBE_INBOUND);
kfree_skb(skb);
return 0;
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/workqueue.h>
+#include <linux/sdt.h>
#include <net/tcp.h>
#include <net/inet_common.h>
#include <net/xfrm.h>
/* Linkage updates. */
__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
inet_twsk_put(tw);
+
+ if (DTRACE_TCP_ENABLED(state__change) &&
+ state != sk->sk_state)
+ DTRACE_TCP_NOCHECK(state__change,
+ struct sk_buff * : pktinfo_t *, NULL,
+ struct sock * : csinfo_t *, sk,
+ __dtrace_tcp_void_ip_t * :
+ ipinfo_t *, NULL,
+ struct tcp_sock * : tcpsinfo_t *,
+ tcp_sk(sk),
+ struct tcphdr * : tcpinfo_t *, NULL,
+ int : tcplsinfo_t *, sk->sk_state,
+ int, state,
+ int, DTRACE_NET_PROBE_OUTBOUND);
} else {
/* Sorry, if we're out of memory, just CLOSE this
* socket up. We've got bigger problems than
#include <linux/compiler.h>
#include <linux/gfp.h>
#include <linux/module.h>
+#include <linux/sdt.h>
/* People can turn this off for buggy TCP's found in printers etc. */
int sysctl_tcp_retrans_collapse __read_mostly = 1;
if (skb->len != tcp_header_size)
tcp_event_data_sent(tp, sk);
+ DTRACE_TCP(send,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ __dtrace_tcp_void_ip_t * : ipinfo_t *, ip_hdr(skb),
+ struct tcp_sock * : tcpsinfo_t *, tp,
+ struct tcphdr * : tcpinfo_t *, tcp_hdr(skb),
+ int : tcplsinfo_t *, sk->sk_state, int, sk->sk_state,
+ int, DTRACE_NET_PROBE_OUTBOUND);
+ if (DTRACE_TCP_ENABLED(connect__request) && th->syn &&
+ th->ack_seq == 0)
+ DTRACE_TCP_NOCHECK(connect__request,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ __dtrace_tcp_void_ip_t * : ipinfo_t *,
+ ip_hdr(skb),
+ struct tcp_sock * : tcpsinfo_t *, tp,
+ struct tcphdr * : tcpinfo_t *, th,
+ int : tcplsinfo_t *, sk->sk_state,
+ int, sk->sk_state,
+ int, DTRACE_NET_PROBE_OUTBOUND);
+
if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
tcp_skb_pcount(skb));
tp->retrans_stamp = tcp_time_stamp;
tcp_connect_queue_skb(sk, buff);
tcp_ecn_send_syn(sk, buff);
+ DTRACE_TCP(state__change, struct sk_buff * : pktinfo_t *, NULL,
+ struct sock * : csinfo_t *, sk,
+ __dtrace_tcp_void_ip_t * : ipinfo_t *, ip_hdr(buff),
+ struct tcp_sock * : tcpsinfo_t *, tp,
+ struct tcphdr * : tcpinfo_t *, tcp_hdr(buff),
+ int : tcplsinfo_t *, TCP_CLOSE,
+ int, sk->sk_state, int, DTRACE_NET_PROBE_OUTBOUND);
/* Send off SYN; include data in Fast Open. */
err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
#include <net/xfrm.h>
#include <trace/events/udp.h>
#include <linux/static_key.h>
+#include <linux/sdt.h>
#include <trace/events/skb.h>
#include <net/busy_poll.h>
#include "udp_impl.h"
uh->check = CSUM_MANGLED_0;
send:
+ DTRACE_UDP(send,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ void_ip_t * : ipinfo_t *, ip_hdr(skb),
+ struct udp_sock * : udpsinfo_t *, udp_sk(sk),
+ struct udphdr * : udpinfo_t *, uh);
+
err = ip_send_skb(sock_net(sk), skb);
if (err) {
if (err == -ENOBUFS && !inet->recverr) {
goto out_free;
}
- if (!peeked)
+ if (!peeked) {
+ DTRACE_UDP(receive,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ void_ip_t * : ipinfo_t *, ip_hdr(skb),
+ struct udp_sock * : udpsinfo_t *, udp_sk(sk),
+ struct udphdr * : udpinfo_t *, udp_hdr(skb));
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
+ }
sock_recv_ts_and_drops(msg, sk, skb);
ret = encap_rcv(sk, skb);
if (ret <= 0) {
+ DTRACE_UDP(receive,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ void_ip_t * : ipinfo_t *,
+ ip_hdr(skb),
+ struct udp_sock * : udpsinfo_t *,
+ udp_sk(sk),
+ struct udphdr * : udpinfo_t *,
+ udp_hdr(skb));
UDP_INC_STATS_BH(sock_net(sk),
UDP_MIB_INDATAGRAMS,
is_udplite);
#include <linux/crypto.h>
#include <linux/scatterlist.h>
+#include <linux/sdt.h>
static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
sock_put(sk);
}
+/* Since we want to trace send events in TCP prior to pushing the segment to
+ * IP - where the IP header is added - we need to construct an argument
+ * containing relevant IP info so that TCP probe consumers can utilize it.
+ */
+static inline void dtrace_tcp_build_ipv6hdr(struct in6_addr *saddr,
+ struct in6_addr *daddr,
+ struct ipv6hdr *ip6h)
+{
+ ip6h->version = 6;
+ ip6h->payload_len = 0;
+ ip6h->nexthdr = IPPROTO_TCP;
+ ip6h->saddr = *saddr;
+ ip6h->daddr = *daddr;
+}
static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
struct flowi *fl,
skb_set_queue_mapping(skb, queue_mapping);
rcu_read_lock();
+
+ if (DTRACE_TCP_ENABLED(send)) {
+ struct ipv6hdr ip6h;
+
+ dtrace_tcp_build_ipv6hdr(&ireq->ir_v6_loc_addr,
+ &ireq->ir_v6_rmt_addr, &ip6h);
+
+ /* Do not supply tcp sk - addresses/ports are not
+ * committed yet - instead translators will fill them
+ * in from IP/TCP data.
+ */
+ DTRACE_TCP_NOCHECK(send,
+ struct sk_buff * : pktinfo_t *,
+ NULL,
+ struct sock * : csinfo_t *, sk,
+ __dtrace_tcp_void_ip_t * :
+ ipinfo_t *, &ip6h,
+ struct tcp_sock * : tcpsinfo_t *,
+ NULL,
+ struct tcphdr * : tcpinfo_t *,
+ tcp_hdr(skb),
+ int : tcplsinfo_t *, TCP_LISTEN,
+ int, TCP_LISTEN,
+ int, DTRACE_NET_PROBE_OUTBOUND);
+ }
+
err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
np->tclass);
rcu_read_unlock();
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
if (!IS_ERR(dst)) {
skb_dst_set(buff, dst);
+ if (DTRACE_TCP_ENABLED(send) ||
+ DTRACE_TCP_ENABLED(accept__refused)) {
+ struct ipv6hdr ip6h;
+
+ dtrace_tcp_build_ipv6hdr(&fl6.saddr, &fl6.daddr,
+ &ip6h);
+
+ /* Do not supply tcp sk - addresses/ports are not
+ * committed yet - instead translators will fill them
+ * in from IP/TCP data.
+ */
+ DTRACE_TCP_NOCHECK(send,
+ struct sk_buff * : pktinfo_t *,
+ NULL,
+ struct sock * : csinfo_t *, NULL,
+ __dtrace_tcp_void_ip_t * :
+ ipinfo_t *, &ip6h,
+ struct tcp_sock * : tcpsinfo_t *,
+ NULL,
+ struct tcphdr * : tcpinfo_t *, t1,
+ int : tcplsinfo_t *, TCP_CLOSE,
+ int, TCP_CLOSE,
+ int, DTRACE_NET_PROBE_OUTBOUND);
+ if (rst && th->syn && th->ack == 0)
+ DTRACE_TCP_NOCHECK(accept__refused,
+ struct sk_buff * :
+ pktinfo_t *, NULL,
+ struct sock * : csinfo_t *,
+ NULL,
+ __dtrace_tcp_void_ip_t * :
+ ipinfo_t *, &ip6h,
+ struct tcp_sock * :
+ tcpsinfo_t *, NULL,
+ struct tcphdr * :
+ tcpinfo_t *, t1,
+ int : tcplsinfo_t *,
+ TCP_CLOSE,
+ int, TCP_CLOSE,
+ int,
+ DTRACE_NET_PROBE_OUTBOUND);
+ }
+
ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
if (rst)
{
const struct tcphdr *th;
const struct ipv6hdr *hdr;
- struct sock *sk;
+ struct sock *sk = NULL;
int ret;
struct net *net = dev_net(skb->dev);
if (!sk)
goto no_tcp_socket;
+ DTRACE_TCP(receive,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ __dtrace_tcp_void_ip_t * : ipinfo_t *, hdr,
+ struct tcp_sock * : tcpsinfo_t *, tcp_sk(sk),
+ struct tcphdr * : tcpinfo_t *, th,
+ int : tcplsinfo_t *, sk ? sk->sk_state : TCP_CLOSE,
+ int, sk ? sk->sk_state : TCP_CLOSE,
+ int, DTRACE_NET_PROBE_INBOUND);
process:
if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
}
discard_it:
+ if (DTRACE_TCP_ENABLED(receive) && skb->pkt_type == PACKET_HOST)
+ DTRACE_TCP_NOCHECK(receive,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ __dtrace_tcp_void_ip_t * : ipinfo_t *,
+ ipv6_hdr(skb),
+ struct tcp_sock * : tcpsinfo_t *, tcp_sk(sk),
+ struct tcphdr * : tcpinfo_t *, tcp_hdr(skb),
+ int : tcplsinfo_t *,
+ sk ? sk->sk_state : TCP_CLOSE,
+ int, sk ? sk->sk_state : TCP_CLOSE,
+ int, DTRACE_NET_PROBE_INBOUND);
kfree_skb(skb);
return 0;
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/sdt.h>
#include <trace/events/skb.h>
#include "udp_impl.h"
goto out_free;
}
if (!peeked) {
+ DTRACE_UDP(receive,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ void_ip_t * : ipinfo_t *, ip_hdr(skb),
+ struct udp_sock * : udpsinfo_t *, udp_sk(sk),
+ struct udphdr * : udpinfo_t *, udp_hdr(skb));
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
ret = encap_rcv(sk, skb);
if (ret <= 0) {
+ DTRACE_UDP(receive,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ void_ip_t * : ipinfo_t *,
+ ip_hdr(skb),
+ struct udp_sock * : udpsinfo_t *,
+ udp_sk(sk),
+ struct udphdr * : udpinfo_t *,
+ udp_hdr(skb));
UDP_INC_STATS_BH(sock_net(sk),
UDP_MIB_INDATAGRAMS,
is_udplite);
uh->check = CSUM_MANGLED_0;
send:
+ DTRACE_UDP(send,
+ struct sk_buff * : pktinfo_t *, skb,
+ struct sock * : csinfo_t *, sk,
+ void_ip_t * : ipinfo_t *, ip_hdr(skb),
+ struct udp_sock * : udpsinfo_t *, udp_sk(sk),
+ struct udphdr * : udpinfo_t *, uh);
+
err = ip6_send_skb(skb);
if (err) {
if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {