* ======================
  */
 
-/*
- * Record our new VLAN Group and enable/disable hardware VLAN Tag extraction
- * based on whether the specified VLAN Group pointer is NULL or not.
- */
-static void cxgb4vf_vlan_rx_register(struct net_device *dev,
-                                    struct vlan_group *grp)
-{
-       struct port_info *pi = netdev_priv(dev);
 
-       pi->vlan_grp = grp;
-       t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1, grp != NULL, 0);
-}
+
 
 /*
  * Perform the MAC and PHY actions needed to enable a "port" (Virtual
 
        /*
         * We do not set address filters and promiscuity here, the stack does
-        * that step explicitly.
+        * that step explicitly. Enable vlan accel.
         */
-       ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, -1,
+       ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
                              true);
        if (ret == 0) {
                ret = t4vf_change_mac(pi->adapter, pi->viid,
        return ret;
 }
 
+static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
+{
+       /*
+        * Since there is no support for separate rx/tx vlan accel
+        * enable/disable make sure tx flag is always in same state as rx.
+        */
+       if (features & NETIF_F_HW_VLAN_RX)
+               features |= NETIF_F_HW_VLAN_TX;
+       else
+               features &= ~NETIF_F_HW_VLAN_TX;
+
+       return features;
+}
+
+static int cxgb4vf_set_features(struct net_device *dev, u32 features)
+{
+       struct port_info *pi = netdev_priv(dev);
+       u32 changed = dev->features ^ features;
+
+       if (changed & NETIF_F_HW_VLAN_RX)
+               t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
+                               features & NETIF_F_HW_VLAN_TX, 0);
+
+       return 0;
+}
+
 /*
  * Change the devices MAC address.
  */
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = cxgb4vf_do_ioctl,
        .ndo_change_mtu         = cxgb4vf_change_mtu,
-       .ndo_vlan_rx_register   = cxgb4vf_vlan_rx_register,
+       .ndo_fix_features       = cxgb4vf_fix_features,
+       .ndo_set_features       = cxgb4vf_set_features,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = cxgb4vf_poll_controller,
 #endif
 
                netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
                        NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-                       NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
+                       NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM;
                netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
                        NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                        NETIF_F_HIGHDMA;
-               netdev->features = netdev->hw_features |
-                       NETIF_F_HW_VLAN_RX;
+               netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_TX;
                if (pci_using_dac)
                        netdev->features |= NETIF_F_HIGHDMA;
 
 
        skb->ip_summed = CHECKSUM_UNNECESSARY;
        skb_record_rx_queue(skb, rxq->rspq.idx);
 
-       if (unlikely(pkt->vlan_ex)) {
-               struct port_info *pi = netdev_priv(rxq->rspq.netdev);
-               struct vlan_group *grp = pi->vlan_grp;
-
-               rxq->stats.vlan_ex++;
-               if (likely(grp)) {
-                       ret = vlan_gro_frags(&rxq->rspq.napi, grp,
-                                            be16_to_cpu(pkt->vlan));
-                       goto stats;
-               }
-       }
+       if (pkt->vlan_ex)
+               __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
        ret = napi_gro_frags(&rxq->rspq.napi);
 
-stats:
        if (ret == GRO_HELD)
                rxq->stats.lro_pkts++;
        else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
                       const struct pkt_gl *gl)
 {
        struct sk_buff *skb;
-       struct port_info *pi;
        const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
        bool csum_ok = pkt->csum_calc && !pkt->err_vec;
        struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
        __skb_pull(skb, PKTSHIFT);
        skb->protocol = eth_type_trans(skb, rspq->netdev);
        skb_record_rx_queue(skb, rspq->idx);
-       pi = netdev_priv(skb->dev);
        rxq->stats.pkts++;
 
        if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
        } else
                skb_checksum_none_assert(skb);
 
-       /*
-        * Deliver the packet to the stack.
-        */
-       if (unlikely(pkt->vlan_ex)) {
-               struct vlan_group *grp = pi->vlan_grp;
-
+       if (pkt->vlan_ex) {
                rxq->stats.vlan_ex++;
-               if (likely(grp))
-                       vlan_hwaccel_receive_skb(skb, grp,
-                                                be16_to_cpu(pkt->vlan));
-               else
-                       dev_kfree_skb_any(skb);
-       } else
-               netif_receive_skb(skb);
+               __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
+       }
+
+       netif_receive_skb(skb);
 
        return 0;
 }