Update rcu_dereference() primitives to use new lockdep-based
checking. The rcu_dereference() in __in6_dev_get() may be
protected either by rcu_read_lock() or RTNL, per Eric Dumazet.
The rcu_dereference() in __sk_free() is protected by the fact
that it is never reached if an update could change it.  Check
for this by using rcu_dereference_check() to verify that the
struct sock's ->sk_wmem_alloc counter is zero.
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <
1266887105-1528-5-git-send-email-paulmck@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
 
 extern void rtnl_unlock(void);
 extern int rtnl_trylock(void);
 extern int rtnl_is_locked(void);
+#ifdef CONFIG_PROVE_LOCKING
+extern int lockdep_rtnl_is_held(void);
+#endif /* #ifdef CONFIG_PROVE_LOCKING */
 
 extern void rtnetlink_init(void);
 extern void __rtnl_unlock(void);
 
 static inline struct inet6_dev *
 __in6_dev_get(struct net_device *dev)
 {
-       return rcu_dereference(dev->ip6_ptr);
+       return rcu_dereference_check(dev->ip6_ptr,
+                                    rcu_read_lock_held() ||
+                                    lockdep_rtnl_is_held());
 }
 
 static inline struct inet6_dev *
 
        rcu_read_lock_bh();
 
        txq = dev_pick_tx(dev, skb);
-       q = rcu_dereference(txq->qdisc);
+       q = rcu_dereference_bh(txq->qdisc);
 
 #ifdef CONFIG_NET_CLS_ACT
        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
 
                return err;
 
        rcu_read_lock_bh();
-       filter = rcu_dereference(sk->sk_filter);
+       filter = rcu_dereference_bh(sk->sk_filter);
        if (filter) {
                unsigned int pkt_len = sk_run_filter(skb, filter->insns,
                                filter->len);
        }
 
        rcu_read_lock_bh();
-       old_fp = rcu_dereference(sk->sk_filter);
+       old_fp = rcu_dereference_bh(sk->sk_filter);
        rcu_assign_pointer(sk->sk_filter, fp);
        rcu_read_unlock_bh();
 
        struct sk_filter *filter;
 
        rcu_read_lock_bh();
-       filter = rcu_dereference(sk->sk_filter);
+       filter = rcu_dereference_bh(sk->sk_filter);
        if (filter) {
                rcu_assign_pointer(sk->sk_filter, NULL);
                sk_filter_delayed_uncharge(sk, filter);
 
 }
 EXPORT_SYMBOL(rtnl_is_locked);
 
+#ifdef CONFIG_PROVE_LOCKING
+int lockdep_rtnl_is_held(void)
+{
+       return lockdep_is_held(&rtnl_mutex);
+}
+EXPORT_SYMBOL(lockdep_rtnl_is_held);
+#endif /* #ifdef CONFIG_PROVE_LOCKING */
+
 static struct rtnl_link *rtnl_msg_handlers[NPROTO];
 
 static inline int rtm_msgindex(int msgtype)
 
        if (sk->sk_destruct)
                sk->sk_destruct(sk);
 
-       filter = rcu_dereference(sk->sk_filter);
+       filter = rcu_dereference_check(sk->sk_filter,
+                                      atomic_read(&sk->sk_wmem_alloc) == 0);
        if (filter) {
                sk_filter_uncharge(sk, filter);
                rcu_assign_pointer(sk->sk_filter, NULL);
 
 
        if (!(flags & MSG_TRYHARD)) {
                rcu_read_lock_bh();
-               for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt;
-                       rt = rcu_dereference(rt->u.dst.dn_next)) {
+               for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
+                       rt = rcu_dereference_bh(rt->u.dst.dn_next)) {
                        if ((flp->fld_dst == rt->fl.fld_dst) &&
                            (flp->fld_src == rt->fl.fld_src) &&
                            (flp->mark == rt->fl.mark) &&
                if (h > s_h)
                        s_idx = 0;
                rcu_read_lock_bh();
-               for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0;
+               for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
                        rt;
-                       rt = rcu_dereference(rt->u.dst.dn_next), idx++) {
+                       rt = rcu_dereference_bh(rt->u.dst.dn_next), idx++) {
                        if (idx < s_idx)
                                continue;
                        skb_dst_set(skb, dst_clone(&rt->u.dst));
 
        for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
                rcu_read_lock_bh();
-               rt = dn_rt_hash_table[s->bucket].chain;
+               rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
                if (rt)
                        break;
                rcu_read_unlock_bh();
        }
-       return rcu_dereference(rt);
+       return rt;
 }
 
 static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt)
                rcu_read_lock_bh();
                rt = dn_rt_hash_table[s->bucket].chain;
        }
-       return rcu_dereference(rt);
+       return rcu_dereference_bh(rt);
 }
 
 static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
 
                if (!rt_hash_table[st->bucket].chain)
                        continue;
                rcu_read_lock_bh();
-               r = rcu_dereference(rt_hash_table[st->bucket].chain);
+               r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
                while (r) {
                        if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
                            r->rt_genid == st->genid)
                                return r;
-                       r = rcu_dereference(r->u.dst.rt_next);
+                       r = rcu_dereference_bh(r->u.dst.rt_next);
                }
                rcu_read_unlock_bh();
        }
                rcu_read_lock_bh();
                r = rt_hash_table[st->bucket].chain;
        }
-       return rcu_dereference(r);
+       return rcu_dereference_bh(r);
 }
 
 static struct rtable *rt_cache_get_next(struct seq_file *seq,
        hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
 
        rcu_read_lock_bh();
-       for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
-               rth = rcu_dereference(rth->u.dst.rt_next)) {
+       for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
+               rth = rcu_dereference_bh(rth->u.dst.rt_next)) {
                if (rth->fl.fl4_dst == flp->fl4_dst &&
                    rth->fl.fl4_src == flp->fl4_src &&
                    rth->fl.iif == 0 &&
                if (!rt_hash_table[h].chain)
                        continue;
                rcu_read_lock_bh();
-               for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
-                    rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
+               for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
+                    rt = rcu_dereference_bh(rt->u.dst.rt_next), idx++) {
                        if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
                                continue;
                        if (rt_is_expired(rt))
 
        struct sk_filter *filter;
 
        rcu_read_lock_bh();
-       filter = rcu_dereference(sk->sk_filter);
+       filter = rcu_dereference_bh(sk->sk_filter);
        if (filter != NULL)
                res = sk_run_filter(skb, filter->insns, filter->len);
        rcu_read_unlock_bh();