struct tcp_metrics_block {
        struct tcp_metrics_block __rcu  *tcpm_next;
+       possible_net_t                  tcpm_net;
        struct inetpeer_addr            tcpm_saddr;
        struct inetpeer_addr            tcpm_daddr;
        unsigned long                   tcpm_stamp;
        struct rcu_head                 rcu_head;
 };
 
+static inline struct net *tm_net(struct tcp_metrics_block *tm)
+{
+       return read_pnet(&tm->tcpm_net);
+}
+
 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
                              enum tcp_metric_index idx)
 {
                if (!tm)
                        goto out_unlock;
        }
+       write_pnet(&tm->tcpm_net, net);
        tm->tcpm_saddr = *saddr;
        tm->tcpm_daddr = *daddr;
 
        for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
             tm = rcu_dereference(tm->tcpm_next)) {
                if (addr_same(&tm->tcpm_saddr, saddr) &&
-                   addr_same(&tm->tcpm_daddr, daddr))
+                   addr_same(&tm->tcpm_daddr, daddr) &&
+                   net_eq(tm_net(tm), net))
                        break;
                depth++;
        }
        for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
             tm = rcu_dereference(tm->tcpm_next)) {
                if (addr_same(&tm->tcpm_saddr, &saddr) &&
-                   addr_same(&tm->tcpm_daddr, &daddr))
+                   addr_same(&tm->tcpm_daddr, &daddr) &&
+                   net_eq(tm_net(tm), net))
                        break;
        }
        tcpm_check_stamp(tm, dst);
        for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
             tm = rcu_dereference(tm->tcpm_next)) {
                if (addr_same(&tm->tcpm_saddr, &saddr) &&
-                   addr_same(&tm->tcpm_daddr, &daddr))
+                   addr_same(&tm->tcpm_daddr, &daddr) &&
+                   net_eq(tm_net(tm), net))
                        break;
        }
        return tm;
                rcu_read_lock();
                for (col = 0, tm = rcu_dereference(hb->chain); tm;
                     tm = rcu_dereference(tm->tcpm_next), col++) {
+                       if (!net_eq(tm_net(tm), net))
+                               continue;
                        if (col < s_col)
                                continue;
                        if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
        for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
             tm = rcu_dereference(tm->tcpm_next)) {
                if (addr_same(&tm->tcpm_daddr, &daddr) &&
-                   (!src || addr_same(&tm->tcpm_saddr, &saddr))) {
+                   (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
+                   net_eq(tm_net(tm), net)) {
                        ret = tcp_metrics_fill_info(msg, tm);
                        break;
                }
        spin_lock_bh(&tcp_metrics_lock);
        for (tm = deref_locked_genl(*pp); tm; tm = deref_locked_genl(*pp)) {
                if (addr_same(&tm->tcpm_daddr, &daddr) &&
-                   (!src || addr_same(&tm->tcpm_saddr, &saddr))) {
+                   (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
+                   net_eq(tm_net(tm), net)) {
                        *pp = tm->tcpm_next;
                        kfree_rcu(tm, rcu_head);
                        found = true;