]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
netfilter: move tee_active to core
authorFlorian Westphal <fw@strlen.de>
Tue, 14 Jul 2015 15:51:07 +0000 (17:51 +0200)
committerChuck Anderson <chuck.anderson@oracle.com>
Sun, 28 May 2017 02:44:17 +0000 (19:44 -0700)
This prepares for a TEE like expression in nftables.
We want to ensure only one duplicate is sent, so both will
use the same percpu variable to detect duplication.

The other use case is detection of recursive call to xtables, but since
we don't want dependency from nft to xtables core its put into core.c
instead of the x_tables core.

Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
(cherry picked from commit e7c8899f3e6f2830136cf6e115c4a55ce7a3920a)

Orabug: 24694570

Signed-off-by: Ethan Zhao <ethan.zhao@oracle.com>
Reviewed-by: Dhaval Giani <dhaval.giani@oracle.com>
include/linux/netfilter.h
net/netfilter/core.c
net/netfilter/xt_TEE.c

index 63560d0a8dfe2802ec826d87921a8848fe56e68b..356aabe50d55c63af2c7cc3ab0a8fce3be153077 100644 (file)
@@ -376,4 +376,15 @@ extern struct nfq_ct_hook __rcu *nfq_ct_hook;
 static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
 #endif
 
+/**
+ * nf_skb_duplicated - TEE target has sent a packet
+ *
+ * When a xtables target sends a packet, the OUTPUT and POSTROUTING
+ * hooks are traversed again, i.e. nft and xtables are invoked recursively.
+ *
+ * This is used by xtables TEE target to prevent the duplicated skb from
+ * being duplicated again.
+ */
+DECLARE_PER_CPU(bool, nf_skb_duplicated);
+
 #endif /*__LINUX_NETFILTER_H*/
index 5d0c6fd59475a811b33734fe2fe32e250eaa0dc2..03da357dd74d00c585362103d46482fcc210340b 100644 (file)
@@ -34,6 +34,9 @@ EXPORT_SYMBOL(nf_afinfo);
 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
 EXPORT_SYMBOL_GPL(nf_ipv6_ops);
 
+DEFINE_PER_CPU(bool, nf_skb_duplicated);
+EXPORT_SYMBOL_GPL(nf_skb_duplicated);
+
 int nf_register_afinfo(const struct nf_afinfo *afinfo)
 {
        mutex_lock(&afinfo_mutex);
index a747eb475b68e174db6a0f3ebe41de8ec5ca7d61..8950e79c4dc935b8f265d87537677cd4df5f0a14 100644 (file)
@@ -37,7 +37,6 @@ struct xt_tee_priv {
 };
 
 static const union nf_inet_addr tee_zero_address;
-static DEFINE_PER_CPU(bool, tee_active);
 
 static struct net *pick_net(struct sk_buff *skb)
 {
@@ -88,7 +87,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        const struct xt_tee_tginfo *info = par->targinfo;
        struct iphdr *iph;
 
-       if (__this_cpu_read(tee_active))
+       if (__this_cpu_read(nf_skb_duplicated))
                return XT_CONTINUE;
        /*
         * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
@@ -125,9 +124,9 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        ip_send_check(iph);
 
        if (tee_tg_route4(skb, info)) {
-               __this_cpu_write(tee_active, true);
+               __this_cpu_write(nf_skb_duplicated, true);
                ip_local_out(skb);
-               __this_cpu_write(tee_active, false);
+               __this_cpu_write(nf_skb_duplicated, false);
        } else {
                kfree_skb(skb);
        }
@@ -170,7 +169,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct xt_tee_tginfo *info = par->targinfo;
 
-       if (__this_cpu_read(tee_active))
+       if (__this_cpu_read(nf_skb_duplicated))
                return XT_CONTINUE;
        skb = pskb_copy(skb, GFP_ATOMIC);
        if (skb == NULL)
@@ -188,9 +187,9 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
                --iph->hop_limit;
        }
        if (tee_tg_route6(skb, info)) {
-               __this_cpu_write(tee_active, true);
+               __this_cpu_write(nf_skb_duplicated, true);
                ip6_local_out(skb);
-               __this_cpu_write(tee_active, false);
+               __this_cpu_write(nf_skb_duplicated, false);
        } else {
                kfree_skb(skb);
        }