struct timer_list timer;
        struct rcu_head rcu;
        unsigned long delete_at;
+       struct net *peer_net;
+       u32 peer_hash_mix;
 };
 
 /* Node FSM states and events:
        return n->links[bearer_id].link;
 }
 
-int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
+int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
 {
        struct tipc_node *n;
        int bearer_id;
        if (unlikely(!n))
                return mtu;
 
+       /* Allow MAX_MSG_SIZE when building connection oriented message
+        * if they are in the same core network
+        */
+       if (n->peer_net && connected) {
+               tipc_node_put(n);
+               return mtu;
+       }
+
        bearer_id = n->active_links[sel & 1];
        if (likely(bearer_id != INVALID_BEARER_ID))
                mtu = n->links[bearer_id].mtu;
        }
 }
 
+static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
+{
+       int net_id = tipc_netid(n->net);
+       struct tipc_net *tn_peer;
+       struct net *tmp;
+       u32 hash_chk;
+
+       if (n->peer_net)
+               return;
+
+       for_each_net_rcu(tmp) {
+               tn_peer = tipc_net(tmp);
+               if (!tn_peer)
+                       continue;
+               /* Integrity checking whether node exists in namespace or not */
+               if (tn_peer->net_id != net_id)
+                       continue;
+               if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
+                       continue;
+               hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
+               if (hash_mixes ^ hash_chk)
+                       continue;
+               n->peer_net = tmp;
+               n->peer_hash_mix = hash_mixes;
+               break;
+       }
+}
+
 static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
-                                         u8 *peer_id, u16 capabilities)
+                                         u8 *peer_id, u16 capabilities,
+                                         u32 signature, u32 hash_mixes)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *n, *temp_node;
        spin_lock_bh(&tn->node_list_lock);
        n = tipc_node_find(net, addr);
        if (n) {
+               if (n->peer_hash_mix ^ hash_mixes)
+                       tipc_node_assign_peer_net(n, hash_mixes);
                if (n->capabilities == capabilities)
                        goto exit;
                /* Same node may come back with new capabilities */
                list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
                        tn->capabilities &= temp_node->capabilities;
                }
+
                goto exit;
        }
        n = kzalloc(sizeof(*n), GFP_ATOMIC);
        n->addr = addr;
        memcpy(&n->peer_id, peer_id, 16);
        n->net = net;
+       n->peer_net = NULL;
+       n->peer_hash_mix = 0;
+       /* Assign kernel local namespace if exists */
+       tipc_node_assign_peer_net(n, hash_mixes);
        n->capabilities = capabilities;
        kref_init(&n->kref);
        rwlock_init(&n->lock);
                                 tipc_bc_sndlink(net),
                                 &n->bc_entry.link)) {
                pr_warn("Broadcast rcv link creation failed, no memory\n");
+               if (n->peer_net) {
+                       n->peer_net = NULL;
+                       n->peer_hash_mix = 0;
+               }
                kfree(n);
                n = NULL;
                goto exit;
 
 void tipc_node_check_dest(struct net *net, u32 addr,
                          u8 *peer_id, struct tipc_bearer *b,
-                         u16 capabilities, u32 signature,
+                         u16 capabilities, u32 signature, u32 hash_mixes,
                          struct tipc_media_addr *maddr,
                          bool *respond, bool *dupl_addr)
 {
        *dupl_addr = false;
        *respond = false;
 
-       n = tipc_node_create(net, addr, peer_id, capabilities);
+       n = tipc_node_create(net, addr, peer_id, capabilities, signature,
+                            hash_mixes);
        if (!n)
                return;
 
        /* Notify publications from this node */
        n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
 
+       if (n->peer_net) {
+               n->peer_net = NULL;
+               n->peer_hash_mix = 0;
+       }
        /* Notify sockets connected to node */
        list_for_each_entry_safe(conn, safe, conns, list) {
                skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
        return -EMSGSIZE;
 }
 
+static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
+{
+       struct tipc_msg *hdr = buf_msg(skb_peek(list));
+       struct sk_buff_head inputq;
+
+       switch (msg_user(hdr)) {
+       case TIPC_LOW_IMPORTANCE:
+       case TIPC_MEDIUM_IMPORTANCE:
+       case TIPC_HIGH_IMPORTANCE:
+       case TIPC_CRITICAL_IMPORTANCE:
+               if (msg_connected(hdr) || msg_named(hdr)) {
+                       tipc_loopback_trace(peer_net, list);
+                       spin_lock_init(&list->lock);
+                       tipc_sk_rcv(peer_net, list);
+                       return;
+               }
+               if (msg_mcast(hdr)) {
+                       tipc_loopback_trace(peer_net, list);
+                       skb_queue_head_init(&inputq);
+                       tipc_sk_mcast_rcv(peer_net, list, &inputq);
+                       __skb_queue_purge(list);
+                       skb_queue_purge(&inputq);
+                       return;
+               }
+               return;
+       case MSG_FRAGMENTER:
+               if (tipc_msg_assemble(list)) {
+                       tipc_loopback_trace(peer_net, list);
+                       skb_queue_head_init(&inputq);
+                       tipc_sk_mcast_rcv(peer_net, list, &inputq);
+                       __skb_queue_purge(list);
+                       skb_queue_purge(&inputq);
+               }
+               return;
+       case GROUP_PROTOCOL:
+       case CONN_MANAGER:
+               tipc_loopback_trace(peer_net, list);
+               spin_lock_init(&list->lock);
+               tipc_sk_rcv(peer_net, list);
+               return;
+       case LINK_PROTOCOL:
+       case NAME_DISTRIBUTOR:
+       case TUNNEL_PROTOCOL:
+       case BCAST_PROTOCOL:
+               return;
+       default:
+               return;
+       };
+}
+
 /**
  * tipc_node_xmit() is the general link level function for message sending
  * @net: the applicable net namespace
        struct tipc_link_entry *le = NULL;
        struct tipc_node *n;
        struct sk_buff_head xmitq;
+       bool node_up = false;
        int bearer_id;
        int rc;
 
        }
 
        tipc_node_read_lock(n);
+       node_up = node_is_up(n);
+       if (node_up && n->peer_net && check_net(n->peer_net)) {
+               /* xmit inner linux container */
+               tipc_lxc_xmit(n->peer_net, list);
+               if (likely(skb_queue_empty(list))) {
+                       tipc_node_read_unlock(n);
+                       tipc_node_put(n);
+                       return 0;
+               }
+       }
+
        bearer_id = n->active_links[selector & 1];
        if (unlikely(bearer_id == INVALID_BEARER_ID)) {
                tipc_node_read_unlock(n);
 
        return i;
 }
+
+void tipc_node_pre_cleanup_net(struct net *exit_net)
+{
+       struct tipc_node *n;
+       struct tipc_net *tn;
+       struct net *tmp;
+
+       rcu_read_lock();
+       for_each_net_rcu(tmp) {
+               if (tmp == exit_net)
+                       continue;
+               tn = tipc_net(tmp);
+               if (!tn)
+                       continue;
+               spin_lock_bh(&tn->node_list_lock);
+               list_for_each_entry_rcu(n, &tn->node_list, list) {
+                       if (!n->peer_net)
+                               continue;
+                       if (n->peer_net != exit_net)
+                               continue;
+                       tipc_node_write_lock(n);
+                       n->peer_net = NULL;
+                       n->peer_hash_mix = 0;
+                       tipc_node_write_unlock_fast(n);
+                       break;
+               }
+               spin_unlock_bh(&tn->node_list_lock);
+       }
+       rcu_read_unlock();
+}
 
 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr);
 void tipc_node_check_dest(struct net *net, u32 onode, u8 *peer_id128,
                          struct tipc_bearer *bearer,
-                         u16 capabilities, u32 signature,
+                         u16 capabilities, u32 signature, u32 hash_mixes,
                          struct tipc_media_addr *maddr,
                          bool *respond, bool *dupl_addr);
 void tipc_node_delete_links(struct net *net, int bearer_id);
 void tipc_node_broadcast(struct net *net, struct sk_buff *skb);
 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
-int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel);
+int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected);
 bool tipc_node_is_up(struct net *net, u32 addr);
 u16 tipc_node_get_capabilities(struct net *net, u32 addr);
 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
                                   struct netlink_callback *cb);
+void tipc_node_pre_cleanup_net(struct net *exit_net);
 #endif
 
 
        /* Build message as chain of buffers */
        __skb_queue_head_init(&pkts);
-       mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
+       mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
        rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
        if (unlikely(rc != dlen))
                return rc;
                return rc;
 
        __skb_queue_head_init(&pkts);
-       mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
+       mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
        rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
        if (unlikely(rc != dlen))
                return rc;
        sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
        tipc_set_sk_state(sk, TIPC_ESTABLISHED);
        tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
-       tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
+       tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
        tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
        __skb_queue_purge(&sk->sk_write_queue);
        if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)