}
 
 #if CP_VLAN_TAG_USED
-       if (cp->vlgrp && vlan_tx_tag_present(skb))
+       if (vlan_tx_tag_present(skb))
                vlan_tag = TxVlanTag | swab16(vlan_tx_tag_get(skb));
 #endif
 
 
        lp->tx_ring[tx_index].tx_flags = 0;
 
 #if AMD8111E_VLAN_TAG_USED
-       if((lp->vlgrp != NULL) && vlan_tx_tag_present(skb)){
+       if (vlan_tx_tag_present(skb)) {
                lp->tx_ring[tx_index].tag_ctrl_cmd |=
                                cpu_to_le16(TCC_VLAN_INSERT);
                lp->tx_ring[tx_index].tag_ctrl_info =
 
                return NETDEV_TX_OK;
        }
 
-       if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
+       if (unlikely(vlan_tx_tag_present(skb))) {
                u16 vlan = vlan_tx_tag_get(skb);
                __le16 tag;
 
 
 
        tpd = atl1e_get_tpd(adapter);
 
-       if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
+       if (unlikely(vlan_tx_tag_present(skb))) {
                u16 vlan_tag = vlan_tx_tag_get(skb);
                u16 atl1e_vlan_tag;
 
 
                (u16) atomic_read(&tpd_ring->next_to_use));
        memset(ptpd, 0, sizeof(struct tx_packet_desc));
 
-       if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                vlan_tag = vlan_tx_tag_get(skb);
                vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
                        ((vlan_tag >> 9) & 0x8);
 
                offset = ((u32)(skb->len-copy_len + 3) & ~3);
        }
 #ifdef NETIF_F_HW_VLAN_TX
-       if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                u16 vlan_tag = vlan_tx_tag_get(skb);
                vlan_tag = (vlan_tag << 4) |
                        (vlan_tag >> 13) |
 
 }
 
 static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
-               bool vlan, u32 wrb_cnt, u32 len)
+               u32 wrb_cnt, u32 len)
 {
        memset(hdr, 0, sizeof(*hdr));
 
                        AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
        }
 
-       if (vlan && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
                AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
                        hdr, vlan_tx_tag_get(skb));
                queue_head_inc(txq);
        }
 
-       wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
-               wrb_cnt, copied);
+       wrb_fill_hdr(hdr, first_skb, wrb_cnt, copied);
        be_dws_cpu_to_le(hdr, sizeof(*hdr));
 
        return copied;
 
                htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
                       BNA_TXQ_WI_SEND));
 
-       if (bnad->vlan_grp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                vlan_tag = (u16) vlan_tx_tag_get(skb);
                flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
        }
 
        }
 
 #ifdef BCM_VLAN
-       if (bp->vlgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                vlan_tag_flags |=
                        (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
        }
 
           pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
 
 #ifdef BCM_VLAN
-       if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
-           (bp->flags & HW_VLAN_TX_FLAG)) {
+       if (vlan_tx_tag_present(skb)) {
                tx_start_bd->vlan_or_ethertype =
                    cpu_to_le16(vlan_tx_tag_get(skb));
                tx_start_bd->bd_flags.as_bitfield |=
 
        cpl->iff = dev->if_port;
 
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-       if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                cpl->vlan_valid = 1;
                cpl->vlan = htons(vlan_tx_tag_get(skb));
                st->vlan_insert++;
 
        cpl->len = htonl(skb->len);
        cntrl = V_TXPKT_INTF(pi->port_id);
 
-       if (vlan_tx_tag_present(skb) && pi->vlan_grp)
+       if (vlan_tx_tag_present(skb))
                cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
 
        tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
                qs->port_stats[SGE_PSTAT_TX_CSUM]++;
        if (skb_shinfo(skb)->gso_size)
                qs->port_stats[SGE_PSTAT_TSO]++;
-       if (vlan_tx_tag_present(skb) && pi->vlan_grp)
+       if (vlan_tx_tag_present(skb))
                qs->port_stats[SGE_PSTAT_VLANINS]++;
 
        /*
 
                }
        }
 
-       if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
+       if (unlikely(vlan_tx_tag_present(skb))) {
                tx_flags |= E1000_TX_FLAGS_VLAN;
                tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
        }
 
        if (e1000_maybe_stop_tx(netdev, count + 2))
                return NETDEV_TX_BUSY;
 
-       if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                tx_flags |= E1000_TX_FLAGS_VLAN;
                tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
        }
 
        }
        pr->swqe_id_counter += 1;
 
-       if (port->vgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
                swqe->vlan_tag = vlan_tx_tag_get(skb);
        }
 
        int vlan_tag_insert = 0;
        int loopback = 0;
 
-       if (enic->vlan_group && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                /* VLAN tag from trunking driver */
                vlan_tag_insert = 1;
                vlan_tag = vlan_tx_tag_get(skb);
 
                         NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
 
        /* vlan tag */
-       if (likely(!np->vlangrp)) {
+       if (vlan_tx_tag_present(skb))
+               start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
+                                       vlan_tx_tag_get(skb));
+       else
                start_tx->txvlan = 0;
-       } else {
-               if (vlan_tx_tag_present(skb))
-                       start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
-               else
-                       start_tx->txvlan = 0;
-       }
 
        spin_lock_irqsave(&np->lock, flags);
 
 
 
        /* make space for additional header when fcb is needed */
        if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
-                       (priv->vlgrp && vlan_tx_tag_present(skb)) ||
+                       vlan_tx_tag_present(skb) ||
                        unlikely(do_tstamp)) &&
                        (skb_headroom(skb) < GMAC_FCB_LEN)) {
                struct sk_buff *skb_new;
                gfar_tx_checksum(skb, fcb);
        }
 
-       if (priv->vlgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                if (unlikely(NULL == fcb)) {
                        fcb = gfar_add_fcb(skb);
                        lstatus |= BD_LFLAG(TXBD_TOE);
 
                tx_flags |= IGB_TX_FLAGS_TSTAMP;
        }
 
-       if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
+       if (vlan_tx_tag_present(skb)) {
                tx_flags |= IGB_TX_FLAGS_VLAN;
                tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
        }
 
        int count = 0;
        unsigned int f;
 
-       if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                tx_flags |= vlan_tx_tag_get(skb);
                if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
                        tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
 
 
        tx_ring = &adapter->tx_ring[r_idx];
 
-       if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                tx_flags |= vlan_tx_tag_get(skb);
                tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_VLAN;
 
        /* If we support per priority flow control and the packet contains
         * a vlan tag, send the packet to the TX ring assigned to that priority
         */
-       if (priv->prof->rx_ppp && priv->vlgrp && vlan_tx_tag_present(skb)) {
+       if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
                vlan_tag = vlan_tx_tag_get(skb);
                return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
        }
 
        tx_ind = skb->queue_mapping;
        ring = &priv->tx_ring[tx_ind];
-       if (priv->vlgrp && vlan_tx_tag_present(skb))
+       if (vlan_tx_tag_present(skb))
                vlan_tag = vlan_tx_tag_get(skb);
 
        /* Check available TXBBs And 2K spare for prefetch */
 
 
        mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
 
-       if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
                             "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
                mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
 
 static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
                                      struct sk_buff *skb)
 {
-       return (tp->vlgrp && vlan_tx_tag_present(skb)) ?
+       return (vlan_tx_tag_present(skb)) ?
                TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
 }
 
 
        }
 
        queue = 0;
-       if (sp->vlgrp && vlan_tx_tag_present(skb))
+       if (vlan_tx_tag_present(skb))
                vlan_tag = vlan_tx_tag_get(skb);
        if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
                if (skb->protocol == htons(ETH_P_IP)) {
 
        ctrl = 0;
 #ifdef SKY2_VLAN_TAG_USED
        /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
-       if (sky2->vlgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                if (!le) {
                        le = get_tx_le(sky2, &slot);
                        le->addr = 0;
 
        }
 
 #if TG3_VLAN_TAG_USED
-       if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
+       if (vlan_tx_tag_present(skb))
                base_flags |= (TXD_FLAG_VLAN |
                               (vlan_tx_tag_get(skb) << 16));
 #endif
                }
        }
 #if TG3_VLAN_TAG_USED
-       if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
+       if (vlan_tx_tag_present(skb))
                base_flags |= (TXD_FLAG_VLAN |
                               (vlan_tx_tag_get(skb) << 16));
 #endif
 
 
        td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
 
-       if (vptr->vlgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
                td_ptr->tdesc1.TCR |= TCR0_VETAG;
        }
 
                dev->name, __func__, __LINE__,
                fifo_hw, dtr, dtr_priv);
 
-       if (vdev->vlgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                u16 vlan_tag = vlan_tx_tag_get(skb);
                vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
        }