]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
bnxt_en: Send PF driver unload notification to all VFs.
authorJeffrey Huang <huangjw@broadcom.com>
Fri, 26 Feb 2016 09:00:00 +0000 (04:00 -0500)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 7 Jul 2016 00:36:56 +0000 (17:36 -0700)
Orabug: 23221795

During remove_one() when SRIOV is enabled, the PF driver
should broadcast PF driver unload notification to all
VFs that are attached to VMs. Upon receiving the PF
driver unload notification, the VF driver should print
a warning message to message log.  Certain operations on the
VF may not succeed after the PF has unloaded.

Signed-off-by: Jeffrey Huang <huangjw@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 19241368443ff976b1924019d29eef8e972158e7)
Signed-off-by: Brian Maly <brian.maly@oracle.com>
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c

index d5eb799ee613136c4afa357792f0219974375bd5..44492c652303d8bfea634df1a3c3f000f996f232 100644 (file)
@@ -1243,13 +1243,17 @@ static int bnxt_async_event_process(struct bnxt *bp,
        switch (event_id) {
        case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
                set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
-               schedule_work(&bp->sp_task);
+               break;
+       case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
+               set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
                break;
        default:
                netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
                           event_id);
-               break;
+               goto async_event_process_exit;
        }
+       schedule_work(&bp->sp_task);
+async_event_process_exit:
        return 0;
 }
 
@@ -5618,6 +5622,8 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
                        }
                }
        }
+       if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
+               netdev_info(bp->dev, "Receive PF driver unload event!");
 }
 
 #else
index 2be51b332652b0d0fd63629795599c10e6c25dea..cc798d5a3527ea0077fffde7b5258b801036e04f 100644 (file)
@@ -986,6 +986,7 @@ struct bnxt {
 #define BNXT_VXLAN_DEL_PORT_SP_EVENT   5
 #define BNXT_RESET_TASK_SP_EVENT       6
 #define BNXT_RST_RING_SP_EVENT         7
+#define BNXT_HWRM_PF_UNLOAD_SP_EVENT   8
 
        struct bnxt_pf_info     pf;
 #ifdef CONFIG_BNXT_SRIOV
index 0b4ca35cee9bd3fbb8931978fd906e474287708a..9f6b4cc6bd0ed7a42ec23a5d1bc9667149879172 100644 (file)
@@ -522,6 +522,46 @@ err_out1:
        return rc;
 }
 
+static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
+                                         struct bnxt_vf_info *vf,
+                                         u16 event_id)
+{
+       int rc = 0;
+       struct hwrm_fwd_async_event_cmpl_input req = {0};
+       struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_async_event_cmpl *async_cmpl;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
+       if (vf)
+               req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
+       else
+               /* broadcast this async event to all VFs */
+               req.encap_async_event_target_id = cpu_to_le16(0xffff);
+       async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
+       async_cmpl->type =
+               cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
+       async_cmpl->event_id = cpu_to_le16(event_id);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+       if (rc) {
+               netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
+                          rc);
+               goto fwd_async_event_cmpl_exit;
+       }
+
+       if (resp->error_code) {
+               netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
+                          resp->error_code);
+               rc = -1;
+       }
+
+fwd_async_event_cmpl_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
 void bnxt_sriov_disable(struct bnxt *bp)
 {
        u16 num_vfs = pci_num_vf(bp->pdev);
@@ -530,6 +570,9 @@ void bnxt_sriov_disable(struct bnxt *bp)
                return;
 
        if (pci_vfs_assigned(bp->pdev)) {
+               bnxt_hwrm_fwd_async_event_cmpl(
+                       bp, NULL,
+                       HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
                netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
                            num_vfs);
        } else {