struct sk_buff_head *xmitq)
 {
        struct tipc_msg *hdr = buf_msg(skb_peek(list));
-       unsigned int maxwin = l->window;
-       int imp = msg_importance(hdr);
-       unsigned int mtu = l->mtu;
+       struct sk_buff_head *backlogq = &l->backlogq;
+       struct sk_buff_head *transmq = &l->transmq;
+       struct sk_buff *skb, *_skb;
+       u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
        u16 ack = l->rcv_nxt - 1;
        u16 seqno = l->snd_nxt;
-       u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
-       struct sk_buff_head *transmq = &l->transmq;
-       struct sk_buff_head *backlogq = &l->backlogq;
-       struct sk_buff *skb, *_skb, **tskb;
        int pkt_cnt = skb_queue_len(list);
+       int imp = msg_importance(hdr);
+       unsigned int maxwin = l->window;
+       unsigned int mtu = l->mtu;
+       bool new_bundle;
        int rc = 0;
 
        if (unlikely(msg_size(hdr) > mtu)) {
        }
 
        /* Prepare each packet for sending, and add to relevant queue: */
-       while (skb_queue_len(list)) {
-               skb = skb_peek(list);
-               hdr = buf_msg(skb);
-               msg_set_seqno(hdr, seqno);
-               msg_set_ack(hdr, ack);
-               msg_set_bcast_ack(hdr, bc_ack);
-
+       while ((skb = __skb_dequeue(list))) {
                if (likely(skb_queue_len(transmq) < maxwin)) {
+                       hdr = buf_msg(skb);
+                       msg_set_seqno(hdr, seqno);
+                       msg_set_ack(hdr, ack);
+                       msg_set_bcast_ack(hdr, bc_ack);
                        _skb = skb_clone(skb, GFP_ATOMIC);
                        if (!_skb) {
+                               kfree_skb(skb);
                                __skb_queue_purge(list);
                                return -ENOBUFS;
                        }
-                       __skb_dequeue(list);
                        __skb_queue_tail(transmq, skb);
                        /* next retransmit attempt */
                        if (link_is_bc_sndlink(l))
                        seqno++;
                        continue;
                }
-               tskb = &l->backlog[imp].target_bskb;
-               if (tipc_msg_bundle(*tskb, hdr, mtu)) {
-                       kfree_skb(__skb_dequeue(list));
-                       l->stats.sent_bundled++;
-                       continue;
-               }
-               if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) {
-                       kfree_skb(__skb_dequeue(list));
-                       __skb_queue_tail(backlogq, *tskb);
-                       l->backlog[imp].len++;
-                       l->stats.sent_bundled++;
-                       l->stats.sent_bundles++;
+               if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
+                                       mtu - INT_H_SIZE, l->addr,
+                                       &new_bundle)) {
+                       if (skb) {
+                               /* Keep a ref. to the skb for next try */
+                               l->backlog[imp].target_bskb = skb;
+                               l->backlog[imp].len++;
+                               __skb_queue_tail(backlogq, skb);
+                       } else {
+                               if (new_bundle) {
+                                       l->stats.sent_bundles++;
+                                       l->stats.sent_bundled++;
+                               }
+                               l->stats.sent_bundled++;
+                       }
                        continue;
                }
                l->backlog[imp].target_bskb = NULL;
-               l->backlog[imp].len += skb_queue_len(list);
+               l->backlog[imp].len += (1 + skb_queue_len(list));
+               __skb_queue_tail(backlogq, skb);
                skb_queue_splice_tail_init(list, backlogq);
        }
        l->snd_nxt = seqno;
 
 }
 
 /**
- * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
- * @skb: the buffer to append to ("bundle")
- * @msg:  message to be appended
- * @mtu:  max allowable size for the bundle buffer
- * Consumes buffer if successful
- * Returns true if bundling could be performed, otherwise false
+ * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
+ * @bskb: the bundle buffer to append to
+ * @msg: message to be appended
+ * @max: max allowable size for the bundle buffer
+ *
+ * Returns "true" if bundling has been performed, otherwise "false"
  */
-bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
+static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
+                           u32 max)
 {
-       struct tipc_msg *bmsg;
-       unsigned int bsz;
-       unsigned int msz = msg_size(msg);
-       u32 start, pad;
-       u32 max = mtu - INT_H_SIZE;
+       struct tipc_msg *bmsg = buf_msg(bskb);
+       u32 msz, bsz, offset, pad;
 
-       if (likely(msg_user(msg) == MSG_FRAGMENTER))
-               return false;
-       if (!skb)
-               return false;
-       bmsg = buf_msg(skb);
+       msz = msg_size(msg);
        bsz = msg_size(bmsg);
-       start = align(bsz);
-       pad = start - bsz;
+       offset = align(bsz);
+       pad = offset - bsz;
 
-       if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL))
+       if (unlikely(skb_tailroom(bskb) < (pad + msz)))
                return false;
-       if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
+       if (unlikely(max < (offset + msz)))
                return false;
-       if (unlikely(msg_user(bmsg) != MSG_BUNDLER))
+
+       skb_put(bskb, pad + msz);
+       skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
+       msg_set_size(bmsg, offset + msz);
+       msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
+       return true;
+}
+
+/**
+ * tipc_msg_try_bundle - Try to bundle a new message to the last one
+ * @tskb: the last/target message to which the new one will be appended
+ * @skb: the new message skb pointer
+ * @mss: max message size (header inclusive)
+ * @dnode: destination node for the message
+ * @new_bundle: if this call made a new bundle or not
+ *
+ * Return: "true" if the new message skb is potential for bundling this time or
+ * later, in the case a bundling has been done this time, the skb is consumed
+ * (the skb pointer = NULL).
+ * Otherwise, "false" if the skb cannot be bundled at all.
+ */
+bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
+                        u32 dnode, bool *new_bundle)
+{
+       struct tipc_msg *msg, *inner, *outer;
+       u32 tsz;
+
+       /* First, check if the new buffer is suitable for bundling */
+       msg = buf_msg(*skb);
+       if (msg_user(msg) == MSG_FRAGMENTER)
                return false;
-       if (unlikely(skb_tailroom(skb) < (pad + msz)))
+       if (msg_user(msg) == TUNNEL_PROTOCOL)
                return false;
-       if (unlikely(max < (start + msz)))
+       if (msg_user(msg) == BCAST_PROTOCOL)
                return false;
-       if ((msg_importance(msg) < TIPC_SYSTEM_IMPORTANCE) &&
-           (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE))
+       if (mss <= INT_H_SIZE + msg_size(msg))
                return false;
 
-       skb_put(skb, pad + msz);
-       skb_copy_to_linear_data_offset(skb, start, msg, msz);
-       msg_set_size(bmsg, start + msz);
-       msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
+       /* Ok, but the last/target buffer can be empty? */
+       if (unlikely(!tskb))
+               return true;
+
+       /* Is it a bundle already? Try to bundle the new message to it */
+       if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
+               *new_bundle = false;
+               goto bundle;
+       }
+
+       /* Make a new bundle of the two messages if possible */
+       tsz = msg_size(buf_msg(tskb));
+       if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg)))
+               return true;
+       if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
+                                     GFP_ATOMIC)))
+               return true;
+       inner = buf_msg(tskb);
+       skb_push(tskb, INT_H_SIZE);
+       outer = buf_msg(tskb);
+       tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
+                     dnode);
+       msg_set_importance(outer, msg_importance(inner));
+       msg_set_size(outer, INT_H_SIZE + tsz);
+       msg_set_msgcnt(outer, 1);
+       *new_bundle = true;
+
+bundle:
+       if (likely(tipc_msg_bundle(tskb, msg, mss))) {
+               consume_skb(*skb);
+               *skb = NULL;
+       }
        return true;
 }
 
        return false;
 }
 
-/**
- * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
- * @list: the buffer chain, where head is the buffer to replace/append
- * @skb: buffer to be created, appended to and returned in case of success
- * @msg: message to be appended
- * @mtu: max allowable size for the bundle buffer, inclusive header
- * @dnode: destination node for message. (Not always present in header)
- * Returns true if success, otherwise false
- */
-bool tipc_msg_make_bundle(struct sk_buff **skb,  struct tipc_msg *msg,
-                         u32 mtu, u32 dnode)
-{
-       struct sk_buff *_skb;
-       struct tipc_msg *bmsg;
-       u32 msz = msg_size(msg);
-       u32 max = mtu - INT_H_SIZE;
-
-       if (msg_user(msg) == MSG_FRAGMENTER)
-               return false;
-       if (msg_user(msg) == TUNNEL_PROTOCOL)
-               return false;
-       if (msg_user(msg) == BCAST_PROTOCOL)
-               return false;
-       if (msz > (max / 2))
-               return false;
-
-       _skb = tipc_buf_acquire(max, GFP_ATOMIC);
-       if (!_skb)
-               return false;
-
-       skb_trim(_skb, INT_H_SIZE);
-       bmsg = buf_msg(_skb);
-       tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
-                     INT_H_SIZE, dnode);
-       msg_set_importance(bmsg, msg_importance(msg));
-       msg_set_seqno(bmsg, msg_seqno(msg));
-       msg_set_ack(bmsg, msg_ack(msg));
-       msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
-       tipc_msg_bundle(_skb, msg, mtu);
-       *skb = _skb;
-       return true;
-}
-
 /**
  * tipc_msg_reverse(): swap source and destination addresses and add error code
  * @own_node: originating node id for reversed message