switch (event_id) {
        case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
                set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
-               schedule_work(&bp->sp_task);
+               break;
+       case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
+               set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
                break;
        default:
                netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
                           event_id);
-               break;
+               goto async_event_process_exit;
        }
+       schedule_work(&bp->sp_task);
+async_event_process_exit:
        return 0;
 }
 
                        }
                }
        }
+       if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
+               netdev_info(bp->dev, "Receive PF driver unload event!");
 }
 
 #else
 
        return rc;
 }
 
+static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
+                                         struct bnxt_vf_info *vf,
+                                         u16 event_id)
+{
+       int rc = 0;
+       struct hwrm_fwd_async_event_cmpl_input req = {0};
+       struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_async_event_cmpl *async_cmpl;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
+       if (vf)
+               req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
+       else
+               /* broadcast this async event to all VFs */
+               req.encap_async_event_target_id = cpu_to_le16(0xffff);
+       async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
+       async_cmpl->type =
+               cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
+       async_cmpl->event_id = cpu_to_le16(event_id);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+       if (rc) {
+               netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
+                          rc);
+               goto fwd_async_event_cmpl_exit;
+       }
+
+       if (resp->error_code) {
+               netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
+                          resp->error_code);
+               rc = -1;
+       }
+
+fwd_async_event_cmpl_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
 void bnxt_sriov_disable(struct bnxt *bp)
 {
        u16 num_vfs = pci_num_vf(bp->pdev);
                return;
 
        if (pci_vfs_assigned(bp->pdev)) {
+               bnxt_hwrm_fwd_async_event_cmpl(
+                       bp, NULL,
+                       HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
                netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
                            num_vfs);
        } else {