int iavf_get_vf_config(struct iavf_adapter *adapter);
 int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter);
 int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter);
+void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
 void iavf_configure_queues(struct iavf_adapter *adapter);
 void iavf_deconfigure_queues(struct iavf_adapter *adapter);
 
        adapter->rx_rings = NULL;
 }
 
+/**
+ * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
+ * @adapter: board private structure
+ *
+ * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
+ * stripped in certain descriptor fields. Instead of checking the offload
+ * capability bits in the hot path, cache the location the ring specific
+ * flags.
+ */
+void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
+{
+       int i;
+
+       for (i = 0; i < adapter->num_active_queues; i++) {
+               struct iavf_ring *tx_ring = &adapter->tx_rings[i];
+               struct iavf_ring *rx_ring = &adapter->rx_rings[i];
+
+               /* prevent multiple L2TAG bits being set after VFR */
+               tx_ring->flags &=
+                       ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
+                         IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
+               rx_ring->flags &=
+                       ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
+                         IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
+
+               if (VLAN_ALLOWED(adapter)) {
+                       tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+                       rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+               } else if (VLAN_V2_ALLOWED(adapter)) {
+                       struct virtchnl_vlan_supported_caps *stripping_support;
+                       struct virtchnl_vlan_supported_caps *insertion_support;
+
+                       stripping_support =
+                               &adapter->vlan_v2_caps.offloads.stripping_support;
+                       insertion_support =
+                               &adapter->vlan_v2_caps.offloads.insertion_support;
+
+                       if (stripping_support->outer) {
+                               if (stripping_support->outer &
+                                   VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+                                       rx_ring->flags |=
+                                               IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+                               else if (stripping_support->outer &
+                                        VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
+                                       rx_ring->flags |=
+                                               IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
+                       } else if (stripping_support->inner) {
+                               if (stripping_support->inner &
+                                   VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+                                       rx_ring->flags |=
+                                               IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+                               else if (stripping_support->inner &
+                                        VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
+                                       rx_ring->flags |=
+                                               IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
+                       }
+
+                       if (insertion_support->outer) {
+                               if (insertion_support->outer &
+                                   VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+                                       tx_ring->flags |=
+                                               IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+                               else if (insertion_support->outer &
+                                        VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
+                                       tx_ring->flags |=
+                                               IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
+                       } else if (insertion_support->inner) {
+                               if (insertion_support->inner &
+                                   VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+                                       tx_ring->flags |=
+                                               IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+                               else if (insertion_support->inner &
+                                        VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
+                                       tx_ring->flags |=
+                                               IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
+                       }
+               }
+       }
+}
+
 /**
  * iavf_alloc_queues - Allocate memory for all rings
  * @adapter: board private structure to initialize
 
        adapter->num_active_queues = num_active_queues;
 
+       iavf_set_queue_vlan_tag_loc(adapter);
+
        return 0;
 
 err_out:
 
        if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
            (vlan_tag & VLAN_VID_MASK))
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+       else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) &&
+                vlan_tag & VLAN_VID_MASK)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
 
        napi_gro_receive(&q_vector->napi, skb);
 }
                struct iavf_rx_buffer *rx_buffer;
                union iavf_rx_desc *rx_desc;
                unsigned int size;
-               u16 vlan_tag;
+               u16 vlan_tag = 0;
                u8 rx_ptype;
                u64 qword;
 
                /* populate checksum, VLAN, and protocol */
                iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
 
-
-               vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
-                          le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+               if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) &&
+                   rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1)
+                       vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1);
+               if (rx_desc->wb.qword2.ext_status &
+                   cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) &&
+                   rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2)
+                       vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2);
 
                iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
                iavf_receive_skb(rx_ring, skb, vlan_tag);
  * Returns error code indicate the frame should be dropped upon error and the
  * otherwise  returns 0 to indicate the flags has been set properly.
  **/
-static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
-                                            struct iavf_ring *tx_ring,
-                                            u32 *flags)
+static void iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
+                                      struct iavf_ring *tx_ring, u32 *flags)
 {
-       __be16 protocol = skb->protocol;
        u32  tx_flags = 0;
 
-       if (protocol == htons(ETH_P_8021Q) &&
-           !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
-               /* When HW VLAN acceleration is turned off by the user the
-                * stack sets the protocol to 8021q so that the driver
-                * can take any steps required to support the SW only
-                * VLAN handling.  In our case the driver doesn't need
-                * to take any further steps so just set the protocol
-                * to the encapsulated ethertype.
-                */
-               skb->protocol = vlan_get_protocol(skb);
-               goto out;
-       }
 
-       /* if we have a HW VLAN tag being added, default to the HW one */
-       if (skb_vlan_tag_present(skb)) {
-               tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
-               tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
-       /* else if it is a SW VLAN, check the next protocol and store the tag */
-       } else if (protocol == htons(ETH_P_8021Q)) {
-               struct vlan_hdr *vhdr, _vhdr;
-
-               vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
-               if (!vhdr)
-                       return -EINVAL;
+       /* stack will only request hardware VLAN insertion offload for protocols
+        * that the driver supports and has enabled
+        */
+       if (!skb_vlan_tag_present(skb))
+               return;
 
-               protocol = vhdr->h_vlan_encapsulated_proto;
-               tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT;
-               tx_flags |= IAVF_TX_FLAGS_SW_VLAN;
+       tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
+       if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) {
+               tx_flags |= IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
+       } else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
+               tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
+       } else {
+               dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n");
+               return;
        }
 
-out:
        *flags = tx_flags;
-       return 0;
 }
 
 /**
        first->gso_segs = 1;
 
        /* prepare the xmit flags */
-       if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
-               goto out_drop;
+       iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags);
+       if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
+               cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 <<
+                       IAVF_TXD_CTX_QW1_CMD_SHIFT;
+               cd_l2tag2 = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
+                       IAVF_TX_FLAGS_VLAN_SHIFT;
+       }
 
        /* obtain protocol of skb */
        protocol = vlan_get_protocol(skb);
 
 #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
 #define IAVF_MIN_DESC_PENDING  4
 
-#define IAVF_TX_FLAGS_HW_VLAN          BIT(1)
-#define IAVF_TX_FLAGS_SW_VLAN          BIT(2)
-#define IAVF_TX_FLAGS_TSO              BIT(3)
-#define IAVF_TX_FLAGS_IPV4             BIT(4)
-#define IAVF_TX_FLAGS_IPV6             BIT(5)
-#define IAVF_TX_FLAGS_FCCRC            BIT(6)
-#define IAVF_TX_FLAGS_FSO              BIT(7)
-#define IAVF_TX_FLAGS_FD_SB            BIT(9)
-#define IAVF_TX_FLAGS_VXLAN_TUNNEL     BIT(10)
-#define IAVF_TX_FLAGS_VLAN_MASK                0xffff0000
-#define IAVF_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
-#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT  29
-#define IAVF_TX_FLAGS_VLAN_SHIFT       16
+#define IAVF_TX_FLAGS_HW_VLAN                  BIT(1)
+#define IAVF_TX_FLAGS_SW_VLAN                  BIT(2)
+#define IAVF_TX_FLAGS_TSO                      BIT(3)
+#define IAVF_TX_FLAGS_IPV4                     BIT(4)
+#define IAVF_TX_FLAGS_IPV6                     BIT(5)
+#define IAVF_TX_FLAGS_FCCRC                    BIT(6)
+#define IAVF_TX_FLAGS_FSO                      BIT(7)
+#define IAVF_TX_FLAGS_FD_SB                    BIT(9)
+#define IAVF_TX_FLAGS_VXLAN_TUNNEL             BIT(10)
+#define IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN     BIT(11)
+#define IAVF_TX_FLAGS_VLAN_MASK                        0xffff0000
+#define IAVF_TX_FLAGS_VLAN_PRIO_MASK           0xe0000000
+#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT          29
+#define IAVF_TX_FLAGS_VLAN_SHIFT               16
 
 struct iavf_tx_buffer {
        struct iavf_tx_desc *next_to_watch;
        u16 flags;
 #define IAVF_TXR_FLAGS_WB_ON_ITR               BIT(0)
 #define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED       BIT(1)
+#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1    BIT(3)
+#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2     BIT(4)
+#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2   BIT(5)
 
        /* stats structs */
        struct iavf_queue_stats stats;
 
                        dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n",
                                 __FUNCTION__);
 
+               iavf_set_queue_vlan_tag_loc(adapter);
+
                }
                break;
        case VIRTCHNL_OP_ENABLE_QUEUES: