bool writen_to_tbl)
 {
        struct hclge_vport_vlan_cfg *vlan, *tmp;
+       struct hclge_dev *hdev = vport->back;
 
-       list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
-               if (vlan->vlan_id == vlan_id)
+       mutex_lock(&hdev->vport_lock);
+
+       list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+               if (vlan->vlan_id == vlan_id) {
+                       mutex_unlock(&hdev->vport_lock);
                        return;
+               }
+       }
 
        vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
-       if (!vlan)
+       if (!vlan) {
+               mutex_unlock(&hdev->vport_lock);
                return;
+       }
 
        vlan->hd_tbl_status = writen_to_tbl;
        vlan->vlan_id = vlan_id;
 
        list_add_tail(&vlan->node, &vport->vlan_list);
+       mutex_unlock(&hdev->vport_lock);
 }
 
 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
        struct hclge_dev *hdev = vport->back;
        int ret;
 
+       mutex_lock(&hdev->vport_lock);
+
        list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
                if (!vlan->hd_tbl_status) {
                        ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
                                dev_err(&hdev->pdev->dev,
                                        "restore vport vlan list failed, ret=%d\n",
                                        ret);
+
+                               mutex_unlock(&hdev->vport_lock);
                                return ret;
                        }
                }
                vlan->hd_tbl_status = true;
        }
 
+       mutex_unlock(&hdev->vport_lock);
+
        return 0;
 }
 
        struct hclge_vport_vlan_cfg *vlan, *tmp;
        struct hclge_dev *hdev = vport->back;
 
+       mutex_lock(&hdev->vport_lock);
+
        list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
                if (vlan->vlan_id == vlan_id) {
                        if (is_write_tbl && vlan->hd_tbl_status)
                        break;
                }
        }
+
+       mutex_unlock(&hdev->vport_lock);
 }
 
 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
        struct hclge_vport_vlan_cfg *vlan, *tmp;
        struct hclge_dev *hdev = vport->back;
 
+       mutex_lock(&hdev->vport_lock);
+
        list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
                if (vlan->hd_tbl_status)
                        hclge_set_vlan_filter_hw(hdev,
                }
        }
        clear_bit(vport->vport_id, hdev->vf_vlan_full);
+       mutex_unlock(&hdev->vport_lock);
 }
 
 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
        struct hclge_vport *vport;
        int i;
 
+       mutex_lock(&hdev->vport_lock);
+
        for (i = 0; i < hdev->num_alloc_vport; i++) {
                vport = &hdev->vport[i];
                list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
                        kfree(vlan);
                }
        }
+
+       mutex_unlock(&hdev->vport_lock);
 }
 
 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev)
        struct hclge_dev *hdev = vport->back;
        int ret;
 
+       mutex_lock(&hdev->vport_lock);
+
        if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
                list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
                        ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
                        vlan->hd_tbl_status = true;
                }
        }
+
+       mutex_unlock(&hdev->vport_lock);
 }
 
 /* For global reset and imp reset, hardware will clear the mac table,
        hclge_misc_irq_uninit(hdev);
        hclge_devlink_uninit(hdev);
        hclge_pci_uninit(hdev);
-       mutex_destroy(&hdev->vport_lock);
        hclge_uninit_vport_vlan_table(hdev);
+       mutex_destroy(&hdev->vport_lock);
        ae_dev->priv = NULL;
 }