* POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "core.h"
-#include "link.h"
 #include "socket.h"
 #include "msg.h"
 #include "bcast.h"
 #include "name_distr.h"
+#include "core.h"
 
 #define        MAX_PKT_DEFAULT_MCAST   1500    /* bcast link max packet size (fixed) */
 #define        BCLINK_WIN_DEFAULT      20      /* bcast link window size (default) */
-#define        BCBEARER                MAX_BEARERS
-
-/**
- * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
- * @primary: pointer to primary bearer
- * @secondary: pointer to secondary bearer
- *
- * Bearers must have same priority and same set of reachable destinations
- * to be paired.
- */
-
-struct tipc_bcbearer_pair {
-       struct tipc_bearer *primary;
-       struct tipc_bearer *secondary;
-};
-
-/**
- * struct tipc_bcbearer - bearer used by broadcast link
- * @bearer: (non-standard) broadcast bearer structure
- * @media: (non-standard) broadcast media structure
- * @bpairs: array of bearer pairs
- * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
- * @remains: temporary node map used by tipc_bcbearer_send()
- * @remains_new: temporary node map used tipc_bcbearer_send()
- *
- * Note: The fields labelled "temporary" are incorporated into the bearer
- * to avoid consuming potentially limited stack space through the use of
- * large local variables within multicast routines.  Concurrent access is
- * prevented through use of the spinlock "bclink_lock".
- */
-struct tipc_bcbearer {
-       struct tipc_bearer bearer;
-       struct tipc_media media;
-       struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
-       struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
-       struct tipc_node_map remains;
-       struct tipc_node_map remains_new;
-};
-
-/**
- * struct tipc_bclink - link used for broadcast messages
- * @lock: spinlock governing access to structure
- * @link: (non-standard) broadcast link structure
- * @node: (non-standard) node structure representing b'cast link's peer node
- * @flags: represent bclink states
- * @bcast_nodes: map of broadcast-capable nodes
- * @retransmit_to: node that most recently requested a retransmit
- *
- * Handles sequence numbering, fragmentation, bundling, etc.
- */
-struct tipc_bclink {
-       spinlock_t lock;
-       struct tipc_link link;
-       struct tipc_node node;
-       unsigned int flags;
-       struct tipc_node_map bcast_nodes;
-       struct tipc_node *retransmit_to;
-};
-
-static struct tipc_bcbearer *bcbearer;
-static struct tipc_bclink *bclink;
-static struct tipc_link *bcl;
 
 const char tipc_bclink_name[] = "broadcast-link";
 
 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
 
-static void tipc_bclink_lock(void)
+static void tipc_bclink_lock(struct net *net)
 {
-       spin_lock_bh(&bclink->lock);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       spin_lock_bh(&tn->bclink->lock);
 }
 
-static void tipc_bclink_unlock(void)
+static void tipc_bclink_unlock(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *node = NULL;
 
-       if (likely(!bclink->flags)) {
-               spin_unlock_bh(&bclink->lock);
+       if (likely(!tn->bclink->flags)) {
+               spin_unlock_bh(&tn->bclink->lock);
                return;
        }
 
-       if (bclink->flags & TIPC_BCLINK_RESET) {
-               bclink->flags &= ~TIPC_BCLINK_RESET;
-               node = tipc_bclink_retransmit_to();
+       if (tn->bclink->flags & TIPC_BCLINK_RESET) {
+               tn->bclink->flags &= ~TIPC_BCLINK_RESET;
+               node = tipc_bclink_retransmit_to(net);
        }
-       spin_unlock_bh(&bclink->lock);
+       spin_unlock_bh(&tn->bclink->lock);
 
        if (node)
                tipc_link_reset_all(node);
        return MAX_PKT_DEFAULT_MCAST;
 }
 
-void tipc_bclink_set_flags(unsigned int flags)
+void tipc_bclink_set_flags(struct net *net, unsigned int flags)
 {
-       bclink->flags |= flags;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       tn->bclink->flags |= flags;
 }
 
 static u32 bcbuf_acks(struct sk_buff *buf)
        bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
 }
 
-void tipc_bclink_add_node(u32 addr)
+void tipc_bclink_add_node(struct net *net, u32 addr)
 {
-       tipc_bclink_lock();
-       tipc_nmap_add(&bclink->bcast_nodes, addr);
-       tipc_bclink_unlock();
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       tipc_bclink_lock(net);
+       tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
+       tipc_bclink_unlock(net);
 }
 
-void tipc_bclink_remove_node(u32 addr)
+void tipc_bclink_remove_node(struct net *net, u32 addr)
 {
-       tipc_bclink_lock();
-       tipc_nmap_remove(&bclink->bcast_nodes, addr);
-       tipc_bclink_unlock();
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       tipc_bclink_lock(net);
+       tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
+       tipc_bclink_unlock(net);
 }
 
-static void bclink_set_last_sent(void)
+static void bclink_set_last_sent(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
+
        if (bcl->next_out)
                bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
        else
                bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
 }
 
-u32 tipc_bclink_get_last_sent(void)
+u32 tipc_bclink_get_last_sent(struct net *net)
 {
-       return bcl->fsm_msg_cnt;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       return tn->bcl->fsm_msg_cnt;
 }
 
 static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
  *
  * Called with bclink_lock locked
  */
-struct tipc_node *tipc_bclink_retransmit_to(void)
+struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
 {
-       return bclink->retransmit_to;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       return tn->bclink->retransmit_to;
 }
 
 /**
  *
  * Called with bclink_lock locked
  */
-static void bclink_retransmit_pkt(u32 after, u32 to)
+static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
 {
        struct sk_buff *skb;
+       struct tipc_link *bcl = tn->bcl;
 
        skb_queue_walk(&bcl->outqueue, skb) {
                if (more(buf_seqno(skb), after)) {
  */
 void tipc_bclink_wakeup_users(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sk_buff *skb;
 
-       while ((skb = skb_dequeue(&bclink->link.waiting_sks)))
+       while ((skb = skb_dequeue(&tn->bclink->link.waiting_sks)))
                tipc_sk_rcv(net, skb);
 }
 
        struct sk_buff *skb, *tmp;
        struct sk_buff *next;
        unsigned int released = 0;
+       struct net *net = n_ptr->net;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
        /* Bail out if tx queue is empty (no clean up is required) */
-       skb = skb_peek(&bcl->outqueue);
+       skb = skb_peek(&tn->bcl->outqueue);
        if (!skb)
                goto exit;
 
                 * acknowledge sent messages only (if other nodes still exist)
                 * or both sent and unsent messages (otherwise)
                 */
-               if (bclink->bcast_nodes.count)
-                       acked = bcl->fsm_msg_cnt;
+               if (tn->bclink->bcast_nodes.count)
+                       acked = tn->bcl->fsm_msg_cnt;
                else
-                       acked = bcl->next_out_no;
+                       acked = tn->bcl->next_out_no;
        } else {
                /*
                 * Bail out if specified sequence number does not correspond
                 * to a message that has been sent and not yet acknowledged
                 */
                if (less(acked, buf_seqno(skb)) ||
-                   less(bcl->fsm_msg_cnt, acked) ||
+                   less(tn->bcl->fsm_msg_cnt, acked) ||
                    less_eq(acked, n_ptr->bclink.acked))
                        goto exit;
        }
 
        /* Skip over packets that node has previously acknowledged */
-       skb_queue_walk(&bcl->outqueue, skb) {
+       skb_queue_walk(&tn->bcl->outqueue, skb) {
                if (more(buf_seqno(skb), n_ptr->bclink.acked))
                        break;
        }
 
        /* Update packets that node is now acknowledging */
-       skb_queue_walk_from_safe(&bcl->outqueue, skb, tmp) {
+       skb_queue_walk_from_safe(&tn->bcl->outqueue, skb, tmp) {
                if (more(buf_seqno(skb), acked))
                        break;
 
-               next = tipc_skb_queue_next(&bcl->outqueue, skb);
-               if (skb != bcl->next_out) {
+               next = tipc_skb_queue_next(&tn->bcl->outqueue, skb);
+               if (skb != tn->bcl->next_out) {
                        bcbuf_decr_acks(skb);
                } else {
                        bcbuf_set_acks(skb, 0);
-                       bcl->next_out = next;
-                       bclink_set_last_sent();
+                       tn->bcl->next_out = next;
+                       bclink_set_last_sent(net);
                }
 
                if (bcbuf_acks(skb) == 0) {
-                       __skb_unlink(skb, &bcl->outqueue);
+                       __skb_unlink(skb, &tn->bcl->outqueue);
                        kfree_skb(skb);
                        released = 1;
                }
        n_ptr->bclink.acked = acked;
 
        /* Try resolving broadcast link congestion, if necessary */
-       if (unlikely(bcl->next_out)) {
-               tipc_link_push_packets(bcl);
-               bclink_set_last_sent();
+       if (unlikely(tn->bcl->next_out)) {
+               tipc_link_push_packets(tn->bcl);
+               bclink_set_last_sent(net);
        }
-       if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks)))
+       if (unlikely(released && !skb_queue_empty(&tn->bcl->waiting_sks)))
                n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
 
 exit:
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
 }
 
 /**
                msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
                msg_set_bcgap_to(msg, to);
 
-               tipc_bclink_lock();
+               tipc_bclink_lock(net);
                tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
-               bcl->stats.sent_nacks++;
-               tipc_bclink_unlock();
+               tn->bcl->stats.sent_nacks++;
+               tipc_bclink_unlock(net);
                kfree_skb(buf);
 
                n_ptr->bclink.oos_state++;
  */
 int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
+       struct tipc_bclink *bclink = tn->bclink;
        int rc = 0;
        int bc = 0;
        struct sk_buff *skb;
 
        /* Broadcast to all other nodes */
        if (likely(bclink)) {
-               tipc_bclink_lock();
+               tipc_bclink_lock(net);
                if (likely(bclink->bcast_nodes.count)) {
                        rc = __tipc_link_xmit(net, bcl, list);
                        if (likely(!rc)) {
                                u32 len = skb_queue_len(&bcl->outqueue);
 
-                               bclink_set_last_sent();
+                               bclink_set_last_sent(net);
                                bcl->stats.queue_sz_counts++;
                                bcl->stats.accu_queue_sz += len;
                        }
                        bc = 1;
                }
-               tipc_bclink_unlock();
+               tipc_bclink_unlock(net);
        }
 
        if (unlikely(!bc))
  */
 static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
 {
+       struct tipc_net *tn = net_generic(node->net, tipc_net_id);
+
        bclink_update_last_sent(node, seqno);
        node->bclink.last_in = seqno;
        node->bclink.oos_state = 0;
-       bcl->stats.recv_info++;
+       tn->bcl->stats.recv_info++;
 
        /*
         * Unicast an ACK periodically, ensuring that
        if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
                tipc_link_proto_xmit(node->active_links[node->addr & 1],
                                     STATE_MSG, 0, 0, 0, 0, 0);
-               bcl->stats.sent_acks++;
+               tn->bcl->stats.sent_acks++;
        }
 }
 
 void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
        struct tipc_msg *msg = buf_msg(buf);
        struct tipc_node *node;
        u32 next_in;
                if (msg_destnode(msg) == tipc_own_addr) {
                        tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
                        tipc_node_unlock(node);
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bcl->stats.recv_nacks++;
-                       bclink->retransmit_to = node;
-                       bclink_retransmit_pkt(msg_bcgap_after(msg),
+                       tn->bclink->retransmit_to = node;
+                       bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
                                              msg_bcgap_to(msg));
-                       tipc_bclink_unlock();
+                       tipc_bclink_unlock(net);
                } else {
                        tipc_node_unlock(node);
                        bclink_peek_nack(net, msg);
 receive:
                /* Deliver message to destination */
                if (likely(msg_isdata(msg))) {
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bclink_accept_pkt(node, seqno);
-                       tipc_bclink_unlock();
+                       tipc_bclink_unlock(net);
                        tipc_node_unlock(node);
                        if (likely(msg_mcast(msg)))
                                tipc_sk_mcast_rcv(net, buf);
                        else
                                kfree_skb(buf);
                } else if (msg_user(msg) == MSG_BUNDLER) {
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bclink_accept_pkt(node, seqno);
                        bcl->stats.recv_bundles++;
                        bcl->stats.recv_bundled += msg_msgcnt(msg);
-                       tipc_bclink_unlock();
+                       tipc_bclink_unlock(net);
                        tipc_node_unlock(node);
                        tipc_link_bundle_rcv(net, buf);
                } else if (msg_user(msg) == MSG_FRAGMENTER) {
                        tipc_buf_append(&node->bclink.reasm_buf, &buf);
                        if (unlikely(!buf && !node->bclink.reasm_buf))
                                goto unlock;
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bclink_accept_pkt(node, seqno);
                        bcl->stats.recv_fragments++;
                        if (buf) {
                                bcl->stats.recv_fragmented++;
                                msg = buf_msg(buf);
-                               tipc_bclink_unlock();
+                               tipc_bclink_unlock(net);
                                goto receive;
                        }
-                       tipc_bclink_unlock();
+                       tipc_bclink_unlock(net);
                        tipc_node_unlock(node);
                } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bclink_accept_pkt(node, seqno);
-                       tipc_bclink_unlock();
+                       tipc_bclink_unlock(net);
                        tipc_node_unlock(node);
                        tipc_named_rcv(net, buf);
                } else {
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bclink_accept_pkt(node, seqno);
-                       tipc_bclink_unlock();
+                       tipc_bclink_unlock(net);
                        tipc_node_unlock(node);
                        kfree_skb(buf);
                }
                buf = NULL;
        }
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
 
        if (deferred)
                bcl->stats.deferred_recv++;
        else
                bcl->stats.duplicates++;
 
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
 
 unlock:
        tipc_node_unlock(node);
 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
 {
        return (n_ptr->bclink.recv_permitted &&
-               (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
+               (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
 }
 
 
  * Returns 0 (packet sent successfully) under all circumstances,
  * since the broadcast link's pseudo-bearer never blocks
  */
-static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
+static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
+                             struct tipc_bearer *unused1,
                              struct tipc_media_addr *unused2)
 {
        int bp_index;
        struct tipc_msg *msg = buf_msg(buf);
-       struct net *net = sock_net(buf->sk);
        struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_bcbearer *bcbearer = tn->bcbearer;
+       struct tipc_bclink *bclink = tn->bclink;
 
        /* Prepare broadcast link message for reliable transmission,
         * if first time trying to send it;
                bcbuf_set_acks(buf, bclink->bcast_nodes.count);
                msg_set_non_seq(msg, 1);
                msg_set_mc_netid(msg, tn->net_id);
-               bcl->stats.sent_info++;
+               tn->bcl->stats.sent_info++;
 
                if (WARN_ON(!bclink->bcast_nodes.count)) {
                        dump_stack();
                        u32 node, bool action)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_bcbearer *bcbearer = tn->bcbearer;
        struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
        struct tipc_bcbearer_pair *bp_curr;
        struct tipc_bearer *b;
        int b_index;
        int pri;
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
 
        if (action)
                tipc_nmap_add(nm_ptr, node);
                bp_curr++;
        }
 
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
 }
 
 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
        return -EMSGSIZE;
 }
 
-int tipc_nl_add_bc_link(struct tipc_nl_msg *msg)
+int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
 {
        int err;
        void *hdr;
        struct nlattr *attrs;
        struct nlattr *prop;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
 
        if (!bcl)
                return 0;
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
 
        hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
                          NLM_F_MULTI, TIPC_NL_LINK_GET);
        if (err)
                goto attr_msg_full;
 
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
        nla_nest_end(msg->skb, attrs);
        genlmsg_end(msg->skb, hdr);
 
 attr_msg_full:
        nla_nest_cancel(msg->skb, attrs);
 msg_full:
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
        genlmsg_cancel(msg->skb, hdr);
 
        return -EMSGSIZE;
 }
 
-int tipc_bclink_stats(char *buf, const u32 buf_size)
+int tipc_bclink_stats(struct net *net, char *buf, const u32 buf_size)
 {
        int ret;
        struct tipc_stats *s;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
 
        if (!bcl)
                return 0;
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
 
        s = &bcl->stats;
 
                             s->queue_sz_counts ?
                             (s->accu_queue_sz / s->queue_sz_counts) : 0);
 
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
        return ret;
 }
 
-int tipc_bclink_reset_stats(void)
+int tipc_bclink_reset_stats(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
+
        if (!bcl)
                return -ENOPROTOOPT;
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
        memset(&bcl->stats, 0, sizeof(bcl->stats));
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
        return 0;
 }
 
-int tipc_bclink_set_queue_limits(u32 limit)
+int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
+
        if (!bcl)
                return -ENOPROTOOPT;
        if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
                return -EINVAL;
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
        tipc_link_set_queue_limits(bcl, limit);
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
        return 0;
 }
 
 int tipc_bclink_init(struct net *net)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_bcbearer *bcbearer;
+       struct tipc_bclink *bclink;
+       struct tipc_link *bcl;
 
        bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
        if (!bcbearer)
        spin_lock_init(&bclink->node.lock);
        __skb_queue_head_init(&bclink->node.waiting_sks);
        bcl->owner = &bclink->node;
+       bcl->owner->net = net;
        bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
        tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
        bcl->bearer_id = MAX_BEARERS;
        rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
        bcl->state = WORKING_WORKING;
        strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
+       tn->bcbearer = bcbearer;
+       tn->bclink = bclink;
+       tn->bcl = bcl;
        return 0;
 }
 
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-       tipc_bclink_lock();
-       tipc_link_purge_queues(bcl);
-       tipc_bclink_unlock();
+       tipc_bclink_lock(net);
+       tipc_link_purge_queues(tn->bcl);
+       tipc_bclink_unlock(net);
 
        RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
        synchronize_net();
-       kfree(bcbearer);
-       kfree(bclink);
+       kfree(tn->bcbearer);
+       kfree(tn->bclink);
 }
 
 /**
 
 #ifndef _TIPC_BCAST_H
 #define _TIPC_BCAST_H
 
-#include "netlink.h"
+#include <linux/tipc_config.h>
+#include "link.h"
+#include "node.h"
 
-#define MAX_NODES 4096
-#define WSIZE 32
-#define TIPC_BCLINK_RESET 1
-
-/**
- * struct tipc_node_map - set of node identifiers
- * @count: # of nodes in set
- * @map: bitmap of node identifiers that are in the set
- */
-struct tipc_node_map {
-       u32 count;
-       u32 map[MAX_NODES / WSIZE];
-};
-
-#define PLSIZE 32
+#define TIPC_BCLINK_RESET      1
+#define PLSIZE                 32
+#define        BCBEARER                MAX_BEARERS
 
 /**
  * struct tipc_port_list - set of node local destination ports
        u32 ports[PLSIZE];
 };
 
+/**
+ * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
+ * @primary: pointer to primary bearer
+ * @secondary: pointer to secondary bearer
+ *
+ * Bearers must have same priority and same set of reachable destinations
+ * to be paired.
+ */
 
-struct tipc_node;
+struct tipc_bcbearer_pair {
+       struct tipc_bearer *primary;
+       struct tipc_bearer *secondary;
+};
 
+/**
+ * struct tipc_bcbearer - bearer used by broadcast link
+ * @bearer: (non-standard) broadcast bearer structure
+ * @media: (non-standard) broadcast media structure
+ * @bpairs: array of bearer pairs
+ * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
+ * @remains: temporary node map used by tipc_bcbearer_send()
+ * @remains_new: temporary node map used tipc_bcbearer_send()
+ *
+ * Note: The fields labelled "temporary" are incorporated into the bearer
+ * to avoid consuming potentially limited stack space through the use of
+ * large local variables within multicast routines.  Concurrent access is
+ * prevented through use of the spinlock "bclink_lock".
+ */
+struct tipc_bcbearer {
+       struct tipc_bearer bearer;
+       struct tipc_media media;
+       struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
+       struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
+       struct tipc_node_map remains;
+       struct tipc_node_map remains_new;
+};
+
+/**
+ * struct tipc_bclink - link used for broadcast messages
+ * @lock: spinlock governing access to structure
+ * @link: (non-standard) broadcast link structure
+ * @node: (non-standard) node structure representing b'cast link's peer node
+ * @flags: represent bclink states
+ * @bcast_nodes: map of broadcast-capable nodes
+ * @retransmit_to: node that most recently requested a retransmit
+ *
+ * Handles sequence numbering, fragmentation, bundling, etc.
+ */
+struct tipc_bclink {
+       spinlock_t lock;
+       struct tipc_link link;
+       struct tipc_node node;
+       unsigned int flags;
+       struct tipc_node_map bcast_nodes;
+       struct tipc_node *retransmit_to;
+};
+
+struct tipc_node;
 extern const char tipc_bclink_name[];
 
 /**
 
 int tipc_bclink_init(struct net *net);
 void tipc_bclink_stop(struct net *net);
-void tipc_bclink_set_flags(unsigned int flags);
-void tipc_bclink_add_node(u32 addr);
-void tipc_bclink_remove_node(u32 addr);
-struct tipc_node *tipc_bclink_retransmit_to(void);
+void tipc_bclink_set_flags(struct net *tn, unsigned int flags);
+void tipc_bclink_add_node(struct net *net, u32 addr);
+void tipc_bclink_remove_node(struct net *net, u32 addr);
+struct tipc_node *tipc_bclink_retransmit_to(struct net *tn);
 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
 void tipc_bclink_rcv(struct net *net, struct sk_buff *buf);
-u32  tipc_bclink_get_last_sent(void);
+u32  tipc_bclink_get_last_sent(struct net *net);
 u32  tipc_bclink_acks_missing(struct tipc_node *n_ptr);
 void tipc_bclink_update_link_state(struct net *net, struct tipc_node *n_ptr,
                                   u32 last_sent);
-int  tipc_bclink_stats(char *stats_buf, const u32 buf_size);
-int  tipc_bclink_reset_stats(void);
-int  tipc_bclink_set_queue_limits(u32 limit);
+int  tipc_bclink_stats(struct net *net, char *stats_buf, const u32 buf_size);
+int  tipc_bclink_reset_stats(struct net *net);
+int  tipc_bclink_set_queue_limits(struct net *net, u32 limit);
 void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
                        u32 node, bool action);
 uint  tipc_bclink_get_mtu(void);
 int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list);
 void tipc_bclink_wakeup_users(struct net *net);
-int tipc_nl_add_bc_link(struct tipc_nl_msg *msg);
+int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
 
 #endif