for (i = 0; i < num_vport; i++) {
                vport->back = hdev;
                vport->vport_id = i;
+               vport->mps = HCLGE_MAC_DEFAULT_FRAME;
 
                if (i == 0)
                        ret = hclge_vport_setup(vport, tqp_main_vport);
 
                if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
                        clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+
+               /* If vf is not alive, set to default value */
+               if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+                       vport->mps = HCLGE_MAC_DEFAULT_FRAME;
        }
 }
 
        struct hclge_config_max_frm_size_cmd *req;
        struct hclge_desc desc;
 
-       new_mps = max(new_mps, HCLGE_MAC_DEFAULT_FRAME);
-
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
 
        req = (struct hclge_config_max_frm_size_cmd *)desc.data;
 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
+
+       return hclge_set_vport_mtu(vport, new_mtu);
+}
+
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
+{
        struct hclge_dev *hdev = vport->back;
-       int max_frm_size, ret;
+       int i, max_frm_size, ret = 0;
 
        max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
        if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
            max_frm_size > HCLGE_MAC_MAX_FRAME)
                return -EINVAL;
 
+       max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+       mutex_lock(&hdev->vport_lock);
+       /* VF's mps must fit within hdev->mps */
+       if (vport->vport_id && max_frm_size > hdev->mps) {
+               mutex_unlock(&hdev->vport_lock);
+               return -EINVAL;
+       } else if (vport->vport_id) {
+               vport->mps = max_frm_size;
+               mutex_unlock(&hdev->vport_lock);
+               return 0;
+       }
+
+       /* PF's mps must be greater then VF's mps */
+       for (i = 1; i < hdev->num_alloc_vport; i++)
+               if (max_frm_size < hdev->vport[i].mps) {
+                       mutex_unlock(&hdev->vport_lock);
+                       return -EINVAL;
+               }
+
        ret = hclge_set_mac_mtu(hdev, max_frm_size);
        if (ret) {
                dev_err(&hdev->pdev->dev,
                        "Change mtu fail, ret =%d\n", ret);
-               return ret;
+               goto out;
        }
 
        hdev->mps = max_frm_size;
+       vport->mps = max_frm_size;
 
        ret = hclge_buffer_alloc(hdev);
        if (ret)
                dev_err(&hdev->pdev->dev,
                        "Allocate buffer fail, ret =%d\n", ret);
 
+out:
+       mutex_unlock(&hdev->vport_lock);
        return ret;
 }
 
        ae_dev->priv = hdev;
        hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
 
+       mutex_init(&hdev->vport_lock);
+
        ret = hclge_pci_init(hdev);
        if (ret) {
                dev_err(&pdev->dev, "PCI init failed\n");
        hclge_destroy_cmd_queue(&hdev->hw);
        hclge_misc_irq_uninit(hdev);
        hclge_pci_uninit(hdev);
+       mutex_destroy(&hdev->vport_lock);
        ae_dev->priv = NULL;
 }
 
 
 
        u32 pkt_buf_size; /* Total pf buf size for tx/rx */
        u32 mps; /* Max packet size */
+       /* vport_lock protect resource shared by vports */
+       struct mutex vport_lock;
 
        struct hclge_vlan_type_cfg vlan_type_cfg;
 
 
        unsigned long state;
        unsigned long last_active_jiffies;
+       u32 mps; /* Max packet size */
 };
 
 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
 int hclge_vport_start(struct hclge_vport *vport);
 void hclge_vport_stop(struct hclge_vport *vport);
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
 #endif
 
        vport->last_active_jiffies = jiffies;
 }
 
+static int hclge_set_vf_mtu(struct hclge_vport *vport,
+                           struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+       int ret;
+       u32 mtu;
+
+       memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
+       ret = hclge_set_vport_mtu(vport, mtu);
+
+       return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
+}
+
 static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
 {
        u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
                case HCLGE_MBX_KEEP_ALIVE:
                        hclge_vf_keep_alive(vport, req);
                        break;
+               case HCLGE_MBX_SET_MTU:
+                       ret = hclge_set_vf_mtu(vport, req);
+                       if (ret)
+                               dev_err(&hdev->pdev->dev,
+                                       "VF fail(%d) to set mtu\n", ret);
+                       break;
                default:
                        dev_err(&hdev->pdev->dev,
                                "un-supported mailbox message, code = %d\n",
 
                                    2, true, NULL, 0);
 }
 
+static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
+{
+       struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+       return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu,
+                                   sizeof(new_mtu), true, NULL, 0);
+}
+
 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
                                 enum hnae3_reset_notify_type type)
 {
        .ae_dev_resetting = hclgevf_ae_dev_resetting,
        .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
        .set_gro_en = hclgevf_gro_en,
+       .set_mtu = hclgevf_set_mtu,
 };
 
 static struct hnae3_ae_algo ae_algovf = {