This patch adds support of hardware rx-vlan-offload to VF driver.
VF uses mailbox to convey PF to configure the hardware.
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: Peng Li <lipeng321@huawei.com>
Signed-off-by: Salil Mehta <salil.mehta@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
 /* set default feature to hns3 */
 static void hns3_set_default_feature(struct net_device *netdev)
 {
-       struct hnae3_handle *h = hns3_get_handle(netdev);
-
        netdev->priv_flags |= IFF_UNICAST_FLT;
 
        netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_GSO_UDP_TUNNEL_CSUM;
 
        netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-               NETIF_F_HW_VLAN_CTAG_TX |
+               NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
                NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
-       if (!(h->flags & HNAE3_SUPPORT_VF))
-               netdev->hw_features |=
-                       NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
 }
 
 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
 
        return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
 }
 
-static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
 
 
 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
                          u16 vlan_id, bool is_kill);
+int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
 
 int hclge_buffer_alloc(struct hclge_dev *hdev);
 int hclge_rss_init_hw(struct hclge_dev *hdev);
 
                memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
                status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
                                               vlan, is_kill);
+       } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) {
+               struct hnae3_handle *handle = &vport->nic;
+               bool en = mbx_req->msg[2] ? true : false;
+
+               status = hclge_en_hw_strip_rxvtag(handle, en);
        }
 
        if (gen_resp)
 
                                    HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
 }
 
+static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+       struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+       u8 msg_data;
+
+       msg_data = enable ? 1 : 0;
+       return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+                                   HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
+                                   1, false, NULL, 0);
+}
+
 static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
        .get_tc_size = hclgevf_get_tc_size,
        .get_fw_version = hclgevf_get_fw_version,
        .set_vlan_filter = hclgevf_set_vlan_filter,
+       .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
        .reset_event = hclgevf_reset_event,
        .get_channels = hclgevf_get_channels,
        .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,