commit 
174e23810cd31
("sk_buff: drop all skb extensions on free and skb scrubbing") made napi
recycle always drop skb extensions.  The additional skb_ext_del() that is
performed via nf_reset on napi skb recycle is not needed anymore.
Most nf_reset() calls in the stack are there so queued skb won't block
'rmmod nf_conntrack' indefinitely.
This removes the skb_ext_del from nf_reset, and renames it to a more
fitting nf_reset_ct().
In a few selected places, add a call to skb_ext_reset to make sure that
no active extensions remain.
I am submitting this for "net", because we're still early in the release
cycle.  The patch applies to net-next too, but I think the rename causes
needless divergence between those trees.
Suggested-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
        skb_dst_drop(skb);
        skb_dst_set(skb, &rt->dst);
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        skb->ip_summed = CHECKSUM_NONE;
        ip_select_ident(net, skb, NULL);
        po = lookup_chan(htons(header->call_id), iph->saddr);
        if (po) {
                skb_dst_drop(skb);
-               nf_reset(skb);
+               nf_reset_ct(skb);
                return sk_receive_skb(sk_pppox(po), skb, 0);
        }
 drop:
 
         */
        skb_orphan(skb);
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (ptr_ring_produce(&tfile->tx_ring, skb))
                goto drop;
 
        /* Don't wait up for transmitted skbs to be freed. */
        if (!use_napi) {
                skb_orphan(skb);
-               nf_reset(skb);
+               nf_reset_ct(skb);
        }
 
        /* If running out of space, stop queue to avoid getting packets that we
 
        struct neighbour *neigh;
        int ret;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        skb->protocol = htons(ETH_P_IPV6);
        skb->dev = dev;
 
        /* reset skb device */
        if (likely(err == 1))
-               nf_reset(skb);
+               nf_reset_ct(skb);
        else
                skb = NULL;
 
        bool is_v6gw = false;
        int ret = -EINVAL;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        /* Be paranoid, rather than too clever. */
        if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
 
        /* reset skb device */
        if (likely(err == 1))
-               nf_reset(skb);
+               nf_reset_ct(skb);
        else
                skb = NULL;
 
 
        skb_orphan(skb);
        skb_dst_drop(skb);
        skb->mark = 0;
-       secpath_reset(skb);
-       nf_reset(skb);
+       skb_ext_reset(skb);
+       nf_reset_ct(skb);
 
        /*
         * Get absolute mactime here so all HWs RX at the "same time", and
 
         */
        dst_release(skb_dst(skb));
        skb_dst_set(skb, NULL);
-#ifdef CONFIG_XFRM
-       secpath_reset(skb);
-#endif
-       nf_reset(skb);
+       skb_ext_reset(skb);
+       nf_reset_ct(skb);
 
 #ifdef CONFIG_NET_SCHED
        skb->tc_index = 0;
 
 static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
 #endif /* CONFIG_SKB_EXTENSIONS */
 
-static inline void nf_reset(struct sk_buff *skb)
+static inline void nf_reset_ct(struct sk_buff *skb)
 {
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
        nf_conntrack_put(skb_nfct(skb));
        skb->_nfct = 0;
 #endif
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-       skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
-#endif
 }
 
 static inline void nf_reset_trace(struct sk_buff *skb)
 
        /* clean the netfilter state now that the batman-adv header has been
         * removed
         */
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
                goto dropped;
 
        skb->ignore_df = 0;
        skb_dst_drop(skb);
        skb_ext_reset(skb);
-       nf_reset(skb);
+       nf_reset_ct(skb);
        nf_reset_trace(skb);
 
 #ifdef CONFIG_NET_SWITCHDEV
 
 
        if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_and_relse;
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted);
 
 
                                kfree_skb(skb);
                                return;
                        }
-                       nf_reset(skb);
+                       nf_reset_ct(skb);
                }
                ret = INDIRECT_CALL_2(ipprot->handler, tcp_v4_rcv, udp_rcv,
                                      skb);
 
        ip_send_check(iph);
 
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
-       nf_reset(skb);
+       nf_reset_ct(skb);
 }
 
 static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
 
                        mroute_sk = rcu_dereference(mrt->mroute_sk);
                        if (mroute_sk) {
-                               nf_reset(skb);
+                               nf_reset_ct(skb);
                                raw_rcv(mroute_sk, skb);
                                return 0;
                        }
 
 
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
        /* Avoid counting cloned packets towards the original connection. */
-       nf_reset(skb);
+       nf_reset_ct(skb);
        nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
 #endif
        /*
 
                kfree_skb(skb);
                return NET_RX_DROP;
        }
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        skb_push(skb, skb->data - skb_network_header(skb));
 
 
        if (tcp_v4_inbound_md5_hash(sk, skb))
                goto discard_and_relse;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (tcp_filter(sk, skb))
                goto discard_and_relse;
 
         */
        if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
                goto drop;
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
                int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
 
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
                goto drop;
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        /* No socket. Drop packet silently, if checksum is wrong */
        if (udp_lib_checksum_complete(skb))
 
                        /* Free reference early: we don't need it any more,
                           and it may hold ip_conntrack module loaded
                           indefinitely. */
-                       nf_reset(skb);
+                       nf_reset_ct(skb);
 
                        skb_postpull_rcsum(skb, skb_network_header(skb),
                                           skb_network_header_len(skb));
 
                return;
 
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
-       nf_reset(skb);
+       nf_reset_ct(skb);
        nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
 #endif
        if (hooknum == NF_INET_PRE_ROUTING ||
 
 
                        /* Not releasing hash table! */
                        if (clone) {
-                               nf_reset(clone);
+                               nf_reset_ct(clone);
                                rawv6_rcv(sk, clone);
                        }
                }
 
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
        IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
                              IPSKB_REROUTED);
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk)) {
 
        skb->ip_summed = CHECKSUM_NONE;
 
        skb_dst_drop(skb);
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        rcu_read_lock();
        dev = rcu_dereference(spriv->dev);
 
        if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_put;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        return sk_receive_skb(sk, skb, 1);
 
 
        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_put;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        return sk_receive_skb(sk, skb, 1);
 
 
        if (unlikely(cp->flags & IP_VS_CONN_F_NFCT))
                ret = ip_vs_confirm_conntrack(skb);
        if (ret == NF_ACCEPT) {
-               nf_reset(skb);
+               nf_reset_ct(skb);
                skb_forward_csum(skb);
        }
        return ret;
 
        }
 
        skb_dst_drop(skb);
-       nf_reset(skb);
+       nf_reset_ct(skb);
        secpath_reset(skb);
 
        skb->pkt_type = PACKET_HOST;
 
        skb_dst_drop(skb);
 
        /* drop conntrack reference */
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        spkt = &PACKET_SKB_CB(skb)->sa.pkt;
 
        skb_dst_drop(skb);
 
        /* drop conntrack reference */
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        spin_lock(&sk->sk_receive_queue.lock);
        po->stats.stats1.tp_packets++;
 
 
        if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
                goto discard_release;
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (sk_filter(sk, skb))
                goto discard_release;
 
        if (err)
                goto drop;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (decaps) {
                sp = skb_sec_path(skb);
 
        skb->skb_iif = 0;
        skb->ignore_df = 0;
        skb_dst_drop(skb);
-       nf_reset(skb);
+       nf_reset_ct(skb);
        nf_reset_trace(skb);
 
        if (!xnet)
 
        struct net *net = xs_net(skb_dst(skb)->xfrm);
 
        while (likely((err = xfrm_output_one(skb, err)) == 0)) {
-               nf_reset(skb);
+               nf_reset_ct(skb);
 
                err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
                if (unlikely(err != 1))
 
                        continue;
                }
 
-               nf_reset(skb);
+               nf_reset_ct(skb);
                skb_dst_drop(skb);
                skb_dst_set(skb, dst);