#ifdef CONFIG_XFRM
struct xfrm_policy __rcu *sk_policy[2];
#endif
- struct numa_drop_counters *sk_drop_counters;
__cacheline_group_end(sock_read_rxtx);
__cacheline_group_begin(sock_write_rxtx);
#ifdef CONFIG_BPF_SYSCALL
struct bpf_local_storage __rcu *sk_bpf_storage;
#endif
+ struct numa_drop_counters *sk_drop_counters;
struct rcu_head sk_rcu;
netns_tracker ns_tracker;
struct xarray sk_user_frags;
set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
}
+static inline void udp_drops_inc(struct sock *sk)
+{
+ numa_drop_add(&udp_sk(sk)->drop_counters, 1);
+}
+
/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
static inline int udp_lib_hash(struct sock *sk)
{
#ifdef CONFIG_MEMCG
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg);
#endif
- CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_drop_counters);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem);
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
drop:
- sk_drops_inc(sk);
+ udp_drops_inc(sk);
busylock_release(busy);
return err;
}
IS_UDPLITE(sk));
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
- sk_drops_inc(sk);
+ udp_drops_inc(sk);
__skb_unlink(skb, rcvq);
*total += skb->truesize;
kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM);
__UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, is_udplite);
__UDP_INC_STATS(net, UDP_MIB_INERRORS, is_udplite);
- sk_drops_inc(sk);
+ udp_drops_inc(sk);
kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM);
goto try_again;
}
if (unlikely(err)) {
if (!peeking) {
- sk_drops_inc(sk);
+ udp_drops_inc(sk);
UDP_INC_STATS(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
- sk_drops_inc(sk);
+ udp_drops_inc(sk);
sk_skb_reason_drop(sk, skb, drop_reason);
return -1;
}
nskb = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!nskb)) {
- sk_drops_inc(sk);
+ udp_drops_inc(sk);
__UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
__UDP_INC_STATS(net, UDP_MIB_INERRORS,
}
if (unlikely(err)) {
if (!peeking) {
- sk_drops_inc(sk);
+ udp_drops_inc(sk);
SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
}
kfree_skb(skb);
__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
- sk_drops_inc(sk);
+ udp_drops_inc(sk);
sk_skb_reason_drop(sk, skb, drop_reason);
return -1;
}
}
nskb = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!nskb)) {
- sk_drops_inc(sk);
+ udp_drops_inc(sk);
__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
__UDP6_INC_STATS(net, UDP_MIB_INERRORS,