]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
bnx2x: add afex support
authorJoe Jin <joe.jin@oracle.com>
Mon, 27 Aug 2012 13:09:05 +0000 (21:09 +0800)
committerJoe Jin <joe.jin@oracle.com>
Wed, 29 Aug 2012 00:22:01 +0000 (08:22 +0800)
Following patch adds afex multifunction support to the driver (afex
multifunction is based on vntag header) and updates FW version used to 7.2.51.

Support includes the following:

1. Configure vif parameters in firmware (default vlan, vif id, default
   priority, allowed priorities) according to values received from NIC.
2. Configure FW to strip/add default vlan according to afex vlan mode.
3. Notify link up to OS only after vif is fully initialized.
4. Support vif list set/get requests and configure FW accordingly.
5. Supply afex statistics upon request from NIC.
6. Special handling to L2 interface in case of FCoE vif.

(backported from upstream commit a334872224a67b614dc888460377862621f3dac7)
Signed-off-by: Barak Witkowski <barak@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Joe Jin <joe.jin@oracle.com>
14 files changed:
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_cmn.c
drivers/net/bnx2x/bnx2x_cmn.h
drivers/net/bnx2x/bnx2x_ethtool.c
drivers/net/bnx2x/bnx2x_fw_defs.h
drivers/net/bnx2x/bnx2x_hsi.h
drivers/net/bnx2x/bnx2x_init.h
drivers/net/bnx2x/bnx2x_link.c
drivers/net/bnx2x/bnx2x_link.h
drivers/net/bnx2x/bnx2x_main.c
drivers/net/bnx2x/bnx2x_sp.c
drivers/net/bnx2x/bnx2x_sp.h
drivers/net/bnx2x/bnx2x_stats.c
drivers/net/bnx2x/bnx2x_stats.h

index eefbe29c2d19150e2599c15ca44cd78d7d62f127..49bf41f9d8b38d494344f089848831fac76d7aad 100644 (file)
@@ -1062,6 +1062,13 @@ struct bnx2x_slowpath {
                struct flow_control_configuration pfc_config;
        } func_rdata;
 
+       /* afex ramrod can not be a part of func_rdata union because these
+        * events might arrive in parallel to other events from func_rdata.
+        * Therefore, if they would have been defined in the same union,
+        * data can get corrupted.
+        */
+       struct afex_vif_list_ramrod_data func_afex_rdata;
+
        /* used by dmae command executer */
        struct dmae_command             dmae[MAX_DMAE_C];
 
@@ -1178,6 +1185,7 @@ struct bnx2x_fw_stats_data {
 enum {
        BNX2X_SP_RTNL_SETUP_TC,
        BNX2X_SP_RTNL_TX_TIMEOUT,
+       BNX2X_SP_RTNL_AFEX_F_UPDATE,
        BNX2X_SP_RTNL_FAN_FAILURE,
 };
 
@@ -1343,13 +1351,14 @@ struct bnx2x {
 
        u32                     mf_config[E1HVN_MAX];
        bool                    prev_max_cfg_invalid[E1HVN_MAX];
-       u32                     mf2_config[E2_FUNC_MAX];
+       u32                     mf_ext_config;
        u32                     path_has_ovlan; /* E3 */
        u16                     mf_ov;
        u8                      mf_mode;
 #define IS_MF(bp)              (bp->mf_mode != 0)
 #define IS_MF_SI(bp)           (bp->mf_mode == MULTI_FUNCTION_SI)
 #define IS_MF_SD(bp)           (bp->mf_mode == MULTI_FUNCTION_SD)
+#define IS_MF_AFEX(bp)         (bp->mf_mode == MULTI_FUNCTION_AFEX)
 
        u8                      wol;
 
@@ -1592,6 +1601,9 @@ struct bnx2x {
        struct dcbx_features                    dcbx_remote_feat;
        u32                                     dcbx_remote_flags;
 #endif
+       /* AFEX: store default vlan used */
+       int                                     afex_def_vlan_tag;
+       enum mf_cfg_afex_vlan_mode              afex_vlan_mode;
        u32                                     pending_max;
 
        /* multiple tx classes of service */
@@ -2148,9 +2160,16 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
 #define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp))
 #define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))
 
+#define BNX2X_MF_EXT_PROTOCOL_FCOE(bp)  ((bp)->mf_ext_config & \
+                                        MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
+
+#define IS_MF_FCOE_AFEX(bp) (IS_MF_AFEX(bp) && BNX2X_MF_EXT_PROTOCOL_FCOE(bp))
 #define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
                                (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
                                 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
+#else
+#define IS_MF_FCOE_AFEX(bp)    false
 #endif
 
+
 #endif /* bnx2x.h */
index 83b7a8356c55081f03935e17963eda908d778274..da5e569cb0a08bf9801f6b360f9f60735289b475 100644 (file)
@@ -1450,8 +1450,8 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
        bp->num_queues = bnx2x_calc_num_queues(bp);
 
 #ifdef BCM_CNIC
-       /* override in STORAGE SD mode */
-       if (IS_MF_STORAGE_SD(bp))
+       /* override in STORAGE SD modes */
+       if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
                bp->num_queues = 1;
 #endif
        /* Add special queues */
@@ -1883,8 +1883,14 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                        SHMEM2_WR(bp, dcc_support,
                                  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
                                   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+               if (SHMEM2_HAS(bp, afex_driver_support))
+                       SHMEM2_WR(bp, afex_driver_support,
+                                 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
        }
 
+       /* Set AFEX default VLAN tag to an invalid value */
+       bp->afex_def_vlan_tag = -1;
+
        bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
        rc = bnx2x_func_start(bp);
        if (rc) {
@@ -3052,7 +3058,8 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
        }
 
 #ifdef BCM_CNIC
-       if (IS_MF_STORAGE_SD(bp) && !is_zero_ether_addr(addr->sa_data)) {
+       if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
+           !is_zero_ether_addr(addr->sa_data)) {
                BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
                return -EINVAL;
        }
@@ -3173,7 +3180,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
        int rx_ring_size = 0;
 
 #ifdef BCM_CNIC
-       if (!bp->rx_ring_size && IS_MF_STORAGE_SD(bp)) {
+       if (!bp->rx_ring_size &&
+           (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
                rx_ring_size = MIN_RX_SIZE_NONTPA;
                bp->rx_ring_size = rx_ring_size;
        } else
index 7eebd8c7799fb544081b79623e0e4dac0be3abe4..a77aad2c5b15bcc5777a840043d1a63e22d39ab5 100644 (file)
@@ -1694,7 +1694,8 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
        if (is_valid_ether_addr(addr))
                return true;
 #ifdef BCM_CNIC
-       if (is_zero_ether_addr(addr) && IS_MF_STORAGE_SD(bp))
+       if (is_zero_ether_addr(addr) &&
+           (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))
                return true;
 #endif
        return false;
index 23ddeea472805640b5ce3d9ee462704d05e31aaa..b60c49a6ef94760ed00eff2b8bfe995441e1060c 100644 (file)
@@ -1441,7 +1441,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,
        ering->rx_mini_pending = 0;
        ering->rx_jumbo_pending = 0;
 
-       ering->tx_max_pending = MAX_TX_AVAIL;
+       ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
        ering->tx_pending = bp->tx_ring_size;
 }
 
@@ -1459,7 +1459,7 @@ static int bnx2x_set_ringparam(struct net_device *dev,
        if ((ering->rx_pending > MAX_RX_AVAIL) ||
            (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
                                                    MIN_RX_SIZE_TPA)) ||
-           (ering->tx_pending > MAX_TX_AVAIL) ||
+           (ering->tx_pending > (IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL)) ||
            (ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
                DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
                return -EINVAL;
index cd6dfa9eaa3aa24c122316c9e4d052ca93d6c47d..45f908141e5a9f32dea77a9a086502f930bfd101 100644 (file)
 
 #define STATS_QUERY_CMD_COUNT 16
 
-#define NIV_LIST_TABLE_SIZE 4096
+#define AFEX_LIST_TABLE_SIZE 4096
 
 #define INVALID_VNIC_ID        0xFF
 
index 799272d164e5f74dfa28c5e8b86d01ef36b3db9a..a440a8ba85f29a39de016f98c3c06206dc2ef608 100644 (file)
@@ -833,6 +833,7 @@ struct shared_feat_cfg {             /* NVRAM Offset */
                #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF      0x00000100
                #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4          0x00000200
                #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT  0x00000300
+               #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE      0x00000400
 
        /* The interval in seconds between sending LLDP packets. Set to zero
           to disable the feature */
@@ -1235,6 +1236,8 @@ struct drv_func_mb {
        #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL     0x00050006
        #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL  0xa1000000
        #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL  0x00050234
+       #define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED        0xa2000000
+       #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED        0x00070002
        #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED   0x00070014
        #define REQ_BC_VER_4_PFC_STATS_SUPPORTED        0x00070201
 
@@ -1242,6 +1245,13 @@ struct drv_func_mb {
        #define DRV_MSG_CODE_DCBX_PMF_DRV_OK            0xb2000000
 
        #define DRV_MSG_CODE_VF_DISABLED_DONE           0xc0000000
+
+       #define DRV_MSG_CODE_AFEX_DRIVER_SETMAC         0xd0000000
+       #define DRV_MSG_CODE_AFEX_LISTGET_ACK           0xd1000000
+       #define DRV_MSG_CODE_AFEX_LISTSET_ACK           0xd2000000
+       #define DRV_MSG_CODE_AFEX_STATSGET_ACK          0xd3000000
+       #define DRV_MSG_CODE_AFEX_VIFSET_ACK            0xd4000000
+
        #define DRV_MSG_CODE_DRV_INFO_ACK               0xd8000000
        #define DRV_MSG_CODE_DRV_INFO_NACK              0xd9000000
 
@@ -1299,6 +1309,14 @@ struct drv_func_mb {
        #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG      0xa0200000
        #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED     0xa0300000
        #define FW_MSG_CODE_VF_DISABLED_DONE            0xb0000000
+       #define FW_MSG_CODE_HW_SET_INVALID_IMAGE        0xb0100000
+
+       #define FW_MSG_CODE_AFEX_DRIVER_SETMAC_DONE     0xd0100000
+       #define FW_MSG_CODE_AFEX_LISTGET_ACK            0xd1100000
+       #define FW_MSG_CODE_AFEX_LISTSET_ACK            0xd2100000
+       #define FW_MSG_CODE_AFEX_STATSGET_ACK           0xd3100000
+       #define FW_MSG_CODE_AFEX_VIFSET_ACK             0xd4100000
+
        #define FW_MSG_CODE_DRV_INFO_ACK                0xd8100000
        #define FW_MSG_CODE_DRV_INFO_NACK               0xd9100000
 
@@ -1357,6 +1375,12 @@ struct drv_func_mb {
 
        #define DRV_STATUS_DCBX_EVENT_MASK              0x000f0000
        #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS     0x00010000
+       #define DRV_STATUS_AFEX_EVENT_MASK              0x03f00000
+       #define DRV_STATUS_AFEX_LISTGET_REQ             0x00100000
+       #define DRV_STATUS_AFEX_LISTSET_REQ             0x00200000
+       #define DRV_STATUS_AFEX_STATSGET_REQ            0x00400000
+       #define DRV_STATUS_AFEX_VIFSET_REQ              0x00800000
+
        #define DRV_STATUS_DRV_INFO_REQ                 0x04000000
 
        u32 virt_mac_upper;
@@ -1448,7 +1472,26 @@ struct func_mf_cfg {
        #define FUNC_MF_CFG_E1HOV_TAG_SHIFT             0
        #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT         FUNC_MF_CFG_E1HOV_TAG_MASK
 
-       u32 reserved[2];
+       /* afex default VLAN ID - 12 bits */
+       #define FUNC_MF_CFG_AFEX_VLAN_MASK              0x0fff0000
+       #define FUNC_MF_CFG_AFEX_VLAN_SHIFT             16
+
+       u32 afex_config;
+       #define FUNC_MF_CFG_AFEX_COS_FILTER_MASK                     0x000000ff
+       #define FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT                    0
+       #define FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK                    0x0000ff00
+       #define FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT                   8
+       #define FUNC_MF_CFG_AFEX_MBA_ENABLED_VAL                     0x00000100
+       #define FUNC_MF_CFG_AFEX_VLAN_MODE_MASK                      0x000f0000
+       #define FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT                     16
+
+       u32 reserved;
+};
+
+enum mf_cfg_afex_vlan_mode {
+       FUNC_MF_CFG_AFEX_VLAN_TRUNK_MODE = 0,
+       FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE,
+       FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE
 };
 
 /* This structure is not applicable and should not be accessed on 57711 */
@@ -1945,18 +1988,29 @@ struct shmem2_region {
 
        u32 nvm_retain_bitmap_addr;                     /* 0x0070 */
 
-       u32     reserved1;      /* 0x0074 */
+       /* afex support of that driver */
+       u32 afex_driver_support;                        /* 0x0074 */
+       #define SHMEM_AFEX_VERSION_MASK                  0x100f
+       #define SHMEM_AFEX_SUPPORTED_VERSION_ONE         0x1001
+       #define SHMEM_AFEX_REDUCED_DRV_LOADED            0x8000
 
-       u32     reserved2[E2_FUNC_MAX];
+       /* driver receives addr in scratchpad to which it should respond */
+       u32 afex_scratchpad_addr_to_write[E2_FUNC_MAX];
 
-       u32     reserved3[E2_FUNC_MAX];/* 0x0088 */
-       u32     reserved4[E2_FUNC_MAX];/* 0x0098 */
+       /* generic params from MCP to driver (value depends on the msg sent
+        * to driver
+        */
+       u32 afex_param1_to_driver[E2_FUNC_MAX];         /* 0x0088 */
+       u32 afex_param2_to_driver[E2_FUNC_MAX];         /* 0x0098 */
 
        u32 swim_base_addr;                             /* 0x0108 */
        u32 swim_funcs;
        u32 swim_main_cb;
 
-       u32     reserved5[2];
+       /* bitmap notifying which VIF profiles stored in nvram are enabled by
+        * switch
+        */
+       u32 afex_profiles_enabled[2];
 
        /* generic flags controlled by the driver */
        u32 drv_flags;
@@ -2696,10 +2750,51 @@ union drv_info_to_mcp {
        struct fcoe_stats_info  fcoe_stat;
        struct iscsi_stats_info iscsi_stat;
 };
+
+/* stats collected for afex.
+ * NOTE: structure is exactly as expected to be received by the switch.
+ *       order must remain exactly as is unless protocol changes !
+ */
+struct afex_stats {
+       u32 tx_unicast_frames_hi;
+       u32 tx_unicast_frames_lo;
+       u32 tx_unicast_bytes_hi;
+       u32 tx_unicast_bytes_lo;
+       u32 tx_multicast_frames_hi;
+       u32 tx_multicast_frames_lo;
+       u32 tx_multicast_bytes_hi;
+       u32 tx_multicast_bytes_lo;
+       u32 tx_broadcast_frames_hi;
+       u32 tx_broadcast_frames_lo;
+       u32 tx_broadcast_bytes_hi;
+       u32 tx_broadcast_bytes_lo;
+       u32 tx_frames_discarded_hi;
+       u32 tx_frames_discarded_lo;
+       u32 tx_frames_dropped_hi;
+       u32 tx_frames_dropped_lo;
+
+       u32 rx_unicast_frames_hi;
+       u32 rx_unicast_frames_lo;
+       u32 rx_unicast_bytes_hi;
+       u32 rx_unicast_bytes_lo;
+       u32 rx_multicast_frames_hi;
+       u32 rx_multicast_frames_lo;
+       u32 rx_multicast_bytes_hi;
+       u32 rx_multicast_bytes_lo;
+       u32 rx_broadcast_frames_hi;
+       u32 rx_broadcast_frames_lo;
+       u32 rx_broadcast_bytes_hi;
+       u32 rx_broadcast_bytes_lo;
+       u32 rx_frames_discarded_hi;
+       u32 rx_frames_discarded_lo;
+       u32 rx_frames_dropped_hi;
+       u32 rx_frames_dropped_lo;
+};
+
 #define BCM_5710_FW_MAJOR_VERSION                      7
 #define BCM_5710_FW_MINOR_VERSION                      2
-#define BCM_5710_FW_REVISION_VERSION           16
-#define BCM_5710_FW_ENGINEERING_VERSION                0
+#define BCM_5710_FW_REVISION_VERSION                   51
+#define BCM_5710_FW_ENGINEERING_VERSION                        0
 #define BCM_5710_FW_COMPILE_FLAGS                      1
 
 
@@ -3389,7 +3484,7 @@ struct client_init_tx_data {
 #define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
 #define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
        u8 default_vlan_flg;
-       u8 reserved2;
+       u8 force_default_pri_flg;
        __le32 reserved3;
 };
 
@@ -4374,9 +4469,22 @@ struct fcoe_statistics_params {
 };
 
 
+/*
+ * The data afex vif list ramrod need
+ */
+struct afex_vif_list_ramrod_data {
+       u8 afex_vif_list_command;
+       u8 func_bit_map;
+       __le16 vif_list_index;
+       u8 func_to_clear;
+       u8 echo;
+       __le16 reserved1;
+};
+
+
 /*
  * cfc delete event data
-*/
+ */
 struct cfc_del_event_data {
        u32 cid;
        u32 reserved0;
@@ -4521,7 +4629,7 @@ enum common_spqe_cmd_id {
        RAMROD_CMD_ID_COMMON_STAT_QUERY,
        RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
        RAMROD_CMD_ID_COMMON_START_TRAFFIC,
-       RAMROD_CMD_ID_COMMON_RESERVED1,
+       RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
        MAX_COMMON_SPQE_CMD_ID
 };
 
@@ -4728,6 +4836,17 @@ struct malicious_vf_event_data {
        u32 reserved3;
 };
 
+/*
+ * vif list event data
+ */
+struct vif_list_event_data {
+       u8 func_bit_map;
+       u8 echo;
+       __le16 reserved0;
+       __le32 reserved1;
+       __le32 reserved2;
+};
+
 /*
  * union for all event ring message types
  */
@@ -4737,6 +4856,7 @@ union event_data {
        struct cfc_del_event_data cfc_del_event;
        struct vf_flr_event_data vf_flr_event;
        struct malicious_vf_event_data malicious_vf_event;
+       struct vif_list_event_data vif_list_event;
 };
 
 
@@ -4802,7 +4922,7 @@ enum event_ring_opcode {
        EVENT_RING_OPCODE_FORWARD_SETUP,
        EVENT_RING_OPCODE_RSS_UPDATE_RULES,
        EVENT_RING_OPCODE_FUNCTION_UPDATE,
-       EVENT_RING_OPCODE_RESERVED1,
+       EVENT_RING_OPCODE_AFEX_VIF_LISTS,
        EVENT_RING_OPCODE_SET_MAC,
        EVENT_RING_OPCODE_CLASSIFICATION_RULES,
        EVENT_RING_OPCODE_FILTERS_RULES,
@@ -4849,12 +4969,27 @@ struct flow_control_configuration {
 struct function_start_data {
        __le16 function_mode;
        __le16 sd_vlan_tag;
-       u16 reserved;
+       __le16 vif_id;
        u8 path_id;
        u8 network_cos_mode;
 };
 
 
+struct function_update_data {
+       u8 vif_id_change_flg;
+       u8 afex_default_vlan_change_flg;
+       u8 allowed_priorities_change_flg;
+       u8 network_cos_mode_change_flg;
+       __le16 vif_id;
+       __le16 afex_default_vlan;
+       u8 allowed_priorities;
+       u8 network_cos_mode;
+       u8 lb_mode_en;
+       u8 reserved0;
+       __le32 reserved1;
+};
+
+
 /*
  * FW version stored in the Xstorm RAM
  */
@@ -5052,7 +5187,7 @@ enum mf_mode {
        SINGLE_FUNCTION,
        MULTI_FUNCTION_SD,
        MULTI_FUNCTION_SI,
-       MULTI_FUNCTION_RESERVED,
+       MULTI_FUNCTION_AFEX,
        MAX_MF_MODE
 };
 
@@ -5177,6 +5312,7 @@ union protocol_common_specific_data {
        u8 protocol_data[8];
        struct regpair phy_address;
        struct regpair mac_config_addr;
+       struct afex_vif_list_ramrod_data afex_vif_list_data;
 };
 
 /*
@@ -5355,6 +5491,18 @@ enum vf_pf_channel_state {
 };
 
 
+/*
+ * vif_list_rule_kind
+ */
+enum vif_list_rule_kind {
+       VIF_LIST_RULE_SET,
+       VIF_LIST_RULE_GET,
+       VIF_LIST_RULE_CLEAR_ALL,
+       VIF_LIST_RULE_CLEAR_FUNC,
+       MAX_VIF_LIST_RULE_KIND
+};
+
+
 /*
  * zone A per-queue data
  */
index 2b7a2bd0592c5962325bd7592a281aeb6554e153..559c396d45cce465ae77999fa0d22508b6d60624 100644 (file)
@@ -125,7 +125,7 @@ enum {
        MODE_MF                        = 0x00000100,
        MODE_MF_SD                     = 0x00000200,
        MODE_MF_SI                     = 0x00000400,
-       MODE_MF_NIV                    = 0x00000800,
+       MODE_MF_AFEX                   = 0x00000800,
        MODE_E3_A0                     = 0x00001000,
        MODE_E3_B0                     = 0x00002000,
        MODE_COS3                      = 0x00004000,
index cb3e05a86b464dd2f255186b75f6bc926d552e30..4b2fc849e070aa7e5789ab4cc57fb6118ff67e06 100644 (file)
@@ -6800,6 +6800,10 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
        else
                rc = bnx2x_update_link_down(params, vars);
 
+       /* Update MCP link status was changed */
+       if (params->feature_config_flags & FEATURE_CONFIG_BC_SUPPORTS_AFEX)
+               bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0);
+
        return rc;
 }
 
index 4c090f28bcfcc5b03bf6b457987aeee766974850..0e7f87e526885c46afc0e00869ee4a35a793a10a 100644 (file)
@@ -254,6 +254,7 @@ struct link_params {
 #define FEATURE_CONFIG_PFC_ENABLED                     (1<<1)
 #define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY                (1<<2)
 #define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY       (1<<3)
+#define FEATURE_CONFIG_BC_SUPPORTS_AFEX                        (1<<8)
 #define FEATURE_CONFIG_AUTOGREEEN_ENABLED                      (1<<9)
 #define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED             (1<<10)
 #define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET                (1<<11)
index cbd0496672489f5be633216e28679d010c816892..c8ccdc922d3daab8d836dfd47192d23cca4313d6 100644 (file)
@@ -1646,6 +1646,27 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
 
        DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
 
+       if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
+           (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
+               /* if Q update ramrod is completed for last Q in AFEX vif set
+                * flow, then ACK MCP at the end
+                *
+                * mark pending ACK to MCP bit.
+                * prevent case that both bits are cleared.
+                * At the end of load/unload driver checks that
+                * sp_state is cleaerd, and this order prevents
+                * races
+                */
+               smp_mb__before_clear_bit();
+               set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
+               wmb();
+               clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
+               smp_mb__after_clear_bit();
+
+               /* schedule workqueue to send ack to MCP */
+               queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+       }
+
        return;
 }
 
@@ -2265,6 +2286,13 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
                bp->mf_config[vn] =
                        MF_CFG_RD(bp, func_mf_config[func].config);
        }
+       if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
+               DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
+               bp->flags |= MF_FUNC_DIS;
+       } else {
+               DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
+               bp->flags &= ~MF_FUNC_DIS;
+       }
 }
 
 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
@@ -2374,6 +2402,190 @@ void bnx2x__link_status_update(struct bnx2x *bp)
        bnx2x_link_report(bp);
 }
 
+static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
+                                 u16 vlan_val, u8 allowed_prio)
+{
+       struct bnx2x_func_state_params func_params = {0};
+       struct bnx2x_func_afex_update_params *f_update_params =
+               &func_params.params.afex_update;
+
+       func_params.f_obj = &bp->func_obj;
+       func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
+
+       /* no need to wait for RAMROD completion, so don't
+        * set RAMROD_COMP_WAIT flag
+        */
+
+       f_update_params->vif_id = vifid;
+       f_update_params->afex_default_vlan = vlan_val;
+       f_update_params->allowed_priorities = allowed_prio;
+
+       /* if ramrod can not be sent, response to MCP immediately */
+       if (bnx2x_func_state_change(bp, &func_params) < 0)
+               bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+
+       return 0;
+}
+
+static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
+                                         u16 vif_index, u8 func_bit_map)
+{
+       struct bnx2x_func_state_params func_params = {0};
+       struct bnx2x_func_afex_viflists_params *update_params =
+               &func_params.params.afex_viflists;
+       int rc;
+       u32 drv_msg_code;
+
+       /* validate only LIST_SET and LIST_GET are received from switch */
+       if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
+               BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
+                         cmd_type);
+
+       func_params.f_obj = &bp->func_obj;
+       func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
+
+       /* set parameters according to cmd_type */
+       update_params->afex_vif_list_command = cmd_type;
+       update_params->vif_list_index = cpu_to_le16(vif_index);
+       update_params->func_bit_map =
+               (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
+       update_params->func_to_clear = 0;
+       drv_msg_code =
+               (cmd_type == VIF_LIST_RULE_GET) ?
+               DRV_MSG_CODE_AFEX_LISTGET_ACK :
+               DRV_MSG_CODE_AFEX_LISTSET_ACK;
+
+       /* if ramrod can not be sent, respond to MCP immediately for
+        * SET and GET requests (other are not triggered from MCP)
+        */
+       rc = bnx2x_func_state_change(bp, &func_params);
+       if (rc < 0)
+               bnx2x_fw_command(bp, drv_msg_code, 0);
+
+       return 0;
+}
+
+static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
+{
+       struct afex_stats afex_stats;
+       u32 func = BP_ABS_FUNC(bp);
+       u32 mf_config;
+       u16 vlan_val;
+       u32 vlan_prio;
+       u16 vif_id;
+       u8 allowed_prio;
+       u8 vlan_mode;
+       u32 addr_to_write, vifid, addrs, stats_type, i;
+
+       if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
+               vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+               DP(BNX2X_MSG_MCP,
+                  "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
+               bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
+       }
+
+       if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
+               vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+               addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
+               DP(BNX2X_MSG_MCP,
+                  "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
+                  vifid, addrs);
+               bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
+                                              addrs);
+       }
+
+       if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
+               addr_to_write = SHMEM2_RD(bp,
+                       afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
+               stats_type = SHMEM2_RD(bp,
+                       afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+
+               DP(BNX2X_MSG_MCP,
+                  "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
+                  addr_to_write);
+
+               bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
+
+               /* write response to scratchpad, for MCP */
+               for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
+                       REG_WR(bp, addr_to_write + i*sizeof(u32),
+                              *(((u32 *)(&afex_stats))+i));
+
+               /* send ack message to MCP */
+               bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
+       }
+
+       if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
+               mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
+               bp->mf_config[BP_VN(bp)] = mf_config;
+               DP(BNX2X_MSG_MCP,
+                  "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
+                  mf_config);
+
+               /* if VIF_SET is "enabled" */
+               if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
+                       /* set rate limit directly to internal RAM */
+                       struct cmng_init_input cmng_input;
+                       struct rate_shaping_vars_per_vn m_rs_vn;
+                       size_t size = sizeof(struct rate_shaping_vars_per_vn);
+                       u32 addr = BAR_XSTRORM_INTMEM +
+                           XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
+
+                       bp->mf_config[BP_VN(bp)] = mf_config;
+
+                       bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
+                       m_rs_vn.vn_counter.rate =
+                               cmng_input.vnic_max_rate[BP_VN(bp)];
+                       m_rs_vn.vn_counter.quota =
+                               (m_rs_vn.vn_counter.rate *
+                                RS_PERIODIC_TIMEOUT_USEC) / 8;
+
+                       __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
+
+                       /* read relevant values from mf_cfg struct in shmem */
+                       vif_id =
+                               (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+                                FUNC_MF_CFG_E1HOV_TAG_MASK) >>
+                               FUNC_MF_CFG_E1HOV_TAG_SHIFT;
+                       vlan_val =
+                               (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+                                FUNC_MF_CFG_AFEX_VLAN_MASK) >>
+                               FUNC_MF_CFG_AFEX_VLAN_SHIFT;
+                       vlan_prio = (mf_config &
+                                    FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
+                                   FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
+                       vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
+                       vlan_mode =
+                               (MF_CFG_RD(bp,
+                                          func_mf_config[func].afex_config) &
+                                FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
+                               FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
+                       allowed_prio =
+                               (MF_CFG_RD(bp,
+                                          func_mf_config[func].afex_config) &
+                                FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
+                               FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
+
+                       /* send ramrod to FW, return in case of failure */
+                       if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
+                                                  allowed_prio))
+                               return;
+
+                       bp->afex_def_vlan_tag = vlan_val;
+                       bp->afex_vlan_mode = vlan_mode;
+               } else {
+                       /* notify link down because BP->flags is disabled */
+                       bnx2x_link_report(bp);
+
+                       /* send INVALID VIF ramrod to FW */
+                       bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
+
+                       /* Reset the default afex VLAN */
+                       bp->afex_def_vlan_tag = -1;
+               }
+       }
+}
+
 static void bnx2x_pmf_update(struct bnx2x *bp)
 {
        int port = BP_PORT(bp);
@@ -2519,8 +2731,11 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
        if (IS_MF_SD(bp))
                __set_bit(BNX2X_Q_FLG_OV, &flags);
 
-       if (IS_FCOE_FP(fp))
+       if (IS_FCOE_FP(fp)) {
                __set_bit(BNX2X_Q_FLG_FCOE, &flags);
+               /* For FCoE - force usage of default priority (for afex) */
+               __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
+       }
 
        if (!fp->disable_tpa) {
                __set_bit(BNX2X_Q_FLG_TPA, &flags);
@@ -2537,6 +2752,10 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
        /* Always set HW VLAN stripping */
        __set_bit(BNX2X_Q_FLG_VLAN, &flags);
 
+       /* configure silent vlan removal */
+       if (IS_MF_AFEX(bp))
+               __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
+
 
        return flags | bnx2x_get_common_flags(bp, fp, true);
 }
@@ -2639,6 +2858,13 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
                rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
        else
                rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+       /* configure silent vlan removal
+        * if multi function mode is afex, then mask default vlan
+        */
+       if (IS_MF_AFEX(bp)) {
+               rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
+               rxq_init->silent_removal_mask = VLAN_VID_MASK;
+       }
 }
 
 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
@@ -3445,6 +3671,7 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
                        int func = BP_FUNC(bp);
 
                        REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+                       bnx2x_read_mf_cfg(bp);
                        bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
                                        func_mf_config[BP_ABS_FUNC(bp)].config);
                        val = SHMEM_RD(bp,
@@ -3467,6 +3694,9 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
                                /* start dcbx state machine */
                                bnx2x_dcbx_set_params(bp,
                                        BNX2X_DCBX_STATE_NEG_RECEIVED);
+                       if (val & DRV_STATUS_AFEX_EVENT_MASK)
+                               bnx2x_handle_afex_cmd(bp,
+                                       val & DRV_STATUS_AFEX_EVENT_MASK);
                        if (bp->link_vars.periodic_flags &
                            PERIODIC_FLAGS_LINK_EVENT) {
                                /*  sync with link */
@@ -4394,6 +4624,93 @@ static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
        netif_addr_unlock_bh(bp->dev);
 }
 
+static inline void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
+                                             union event_ring_elem *elem)
+{
+       if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
+               DP(BNX2X_MSG_SP,
+                  "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
+                  elem->message.data.vif_list_event.func_bit_map);
+               bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
+                       elem->message.data.vif_list_event.func_bit_map);
+       } else if (elem->message.data.vif_list_event.echo ==
+                  VIF_LIST_RULE_SET) {
+               DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
+               bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
+       }
+}
+
+/* called with rtnl_lock */
+static inline void bnx2x_after_function_update(struct bnx2x *bp)
+{
+       int q, rc;
+       struct bnx2x_fastpath *fp;
+       struct bnx2x_queue_state_params queue_params = {NULL};
+       struct bnx2x_queue_update_params *q_update_params =
+               &queue_params.params.update;
+
+       /* Send Q update command with afex vlan removal values  for all Qs */
+       queue_params.cmd = BNX2X_Q_CMD_UPDATE;
+
+       /* set silent vlan removal values according to vlan mode */
+       __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+                 &q_update_params->update_flags);
+       __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+                 &q_update_params->update_flags);
+       __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
+
+       /* in access mode mark mask and value are 0 to strip all vlans */
+       if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
+               q_update_params->silent_removal_value = 0;
+               q_update_params->silent_removal_mask = 0;
+       } else {
+               q_update_params->silent_removal_value =
+                       (bp->afex_def_vlan_tag & VLAN_VID_MASK);
+               q_update_params->silent_removal_mask = VLAN_VID_MASK;
+       }
+
+       for_each_eth_queue(bp, q) {
+               /* Set the appropriate Queue object */
+               fp = &bp->fp[q];
+               queue_params.q_obj = &fp->q_obj;
+
+               /* send the ramrod */
+               rc = bnx2x_queue_state_change(bp, &queue_params);
+               if (rc < 0)
+                       BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
+                                 q);
+       }
+
+#ifdef BCM_CNIC
+       if (!NO_FCOE(bp)) {
+               fp = &bp->fp[FCOE_IDX];
+               queue_params.q_obj = &fp->q_obj;
+
+               /* clear pending completion bit */
+               __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
+
+               /* mark latest Q bit */
+               smp_mb__before_clear_bit();
+               set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
+               smp_mb__after_clear_bit();
+
+               /* send Q update ramrod for FCoE Q */
+               rc = bnx2x_queue_state_change(bp, &queue_params);
+               if (rc < 0)
+                       BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
+                                 q);
+       } else {
+               /* If no FCoE ring - ACK MCP now */
+               bnx2x_link_report(bp);
+               bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+       }
+#else
+       /* If no FCoE ring - ACK MCP now */
+       bnx2x_link_report(bp);
+       bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+#endif /* BCM_CNIC */
+}
+
 static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
        struct bnx2x *bp, u32 cid)
 {
@@ -4492,6 +4809,28 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                                break;
                        bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
                        goto next_spqe;
+               case EVENT_RING_OPCODE_FUNCTION_UPDATE:
+                       DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
+                          "AFEX: ramrod completed FUNCTION_UPDATE\n");
+                       f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE);
+
+                       /* We will perform the Queues update from sp_rtnl task
+                        * as all Queue SP operations should run under
+                        * rtnl_lock.
+                        */
+                       smp_mb__before_clear_bit();
+                       set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
+                               &bp->sp_rtnl_state);
+                       smp_mb__after_clear_bit();
+
+                       schedule_delayed_work(&bp->sp_rtnl_task, 0);
+                       goto next_spqe;
+
+               case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
+                       f_obj->complete_cmd(bp, f_obj,
+                                           BNX2X_F_CMD_AFEX_VIFLISTS);
+                       bnx2x_after_afex_vif_lists(bp, elem);
+                       goto next_spqe;
                case EVENT_RING_OPCODE_FUNCTION_START:
                        DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
                           "got FUNC_START ramrod\n");
@@ -4623,6 +4962,13 @@ static void bnx2x_sp_task(struct work_struct *work)
 
        bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
             le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
+
+       /* afex - poll to check if VIFSET_ACK should be sent to MFW */
+       if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
+                              &bp->sp_state)) {
+               bnx2x_link_report(bp);
+               bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+       }
 }
 
 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -6094,12 +6440,24 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
        if (!CHIP_IS_E1(bp))
                REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
 
-       if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp))
-               /* Bit-map indicating which L2 hdrs may appear
-                * after the basic Ethernet header
-                */
-               REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
-                      bp->path_has_ovlan ? 7 : 6);
+       if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
+               if (IS_MF_AFEX(bp)) {
+                       /* configure that VNTag and VLAN headers must be
+                        * received in afex mode
+                        */
+                       REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
+                       REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
+                       REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
+                       REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
+                       REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
+               } else {
+                       /* Bit-map indicating which L2 hdrs may appear
+                        * after the basic Ethernet header
+                        */
+                       REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
+                              bp->path_has_ovlan ? 7 : 6);
+               }
+       }
 
        bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
        bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
@@ -6133,9 +6491,21 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
        bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
        bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
 
-       if (!CHIP_IS_E1x(bp))
-               REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
-                      bp->path_has_ovlan ? 7 : 6);
+       if (!CHIP_IS_E1x(bp)) {
+               if (IS_MF_AFEX(bp)) {
+                       /* configure that VNTag and VLAN headers must be
+                        * sent in afex mode
+                        */
+                       REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
+                       REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
+                       REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
+                       REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
+                       REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
+               } else {
+                       REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
+                              bp->path_has_ovlan ? 7 : 6);
+               }
+       }
 
        REG_WR(bp, SRC_REG_SOFT_RST, 1);
 
@@ -6353,15 +6723,29 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
 
 
        bnx2x_init_block(bp, BLOCK_PRS, init_phase);
-       if (CHIP_IS_E3B0(bp))
-               /* Ovlan exists only if we are in multi-function +
-                * switch-dependent mode, in switch-independent there
-                * is no ovlan headers
-                */
-               REG_WR(bp, BP_PORT(bp) ?
-                      PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
-                      PRS_REG_HDRS_AFTER_BASIC_PORT_0,
-                      (bp->path_has_ovlan ? 7 : 6));
+       if (CHIP_IS_E3B0(bp)) {
+               if (IS_MF_AFEX(bp)) {
+                       /* configure headers for AFEX mode */
+                       REG_WR(bp, BP_PORT(bp) ?
+                              PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+                              PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
+                       REG_WR(bp, BP_PORT(bp) ?
+                              PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
+                              PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
+                       REG_WR(bp, BP_PORT(bp) ?
+                              PRS_REG_MUST_HAVE_HDRS_PORT_1 :
+                              PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
+               } else {
+                       /* Ovlan exists only if we are in multi-function +
+                        * switch-dependent mode, in switch-independent there
+                        * is no ovlan headers
+                        */
+                       REG_WR(bp, BP_PORT(bp) ?
+                              PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+                              PRS_REG_HDRS_AFTER_BASIC_PORT_0,
+                              (bp->path_has_ovlan ? 7 : 6));
+               }
+       }
 
        bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
        bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
@@ -6423,10 +6807,15 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
                /* Bit-map indicating which L2 hdrs may appear after the
                 * basic Ethernet header
                 */
-               REG_WR(bp, BP_PORT(bp) ?
-                          NIG_REG_P1_HDRS_AFTER_BASIC :
-                          NIG_REG_P0_HDRS_AFTER_BASIC,
-                          IS_MF_SD(bp) ? 7 : 6);
+               if (IS_MF_AFEX(bp))
+                       REG_WR(bp, BP_PORT(bp) ?
+                              NIG_REG_P1_HDRS_AFTER_BASIC :
+                              NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
+               else
+                       REG_WR(bp, BP_PORT(bp) ?
+                              NIG_REG_P1_HDRS_AFTER_BASIC :
+                              NIG_REG_P0_HDRS_AFTER_BASIC,
+                              IS_MF_SD(bp) ? 7 : 6);
 
                if (CHIP_IS_E3(bp))
                        REG_WR(bp, BP_PORT(bp) ?
@@ -6448,6 +6837,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
                                val = 1;
                                break;
                        case MULTI_FUNCTION_SI:
+                       case MULTI_FUNCTION_AFEX:
                                val = 2;
                                break;
                        }
@@ -7034,7 +7424,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
        unsigned long ramrod_flags = 0;
 
 #ifdef BCM_CNIC
-       if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_STORAGE_SD(bp)) {
+       if (is_zero_ether_addr(bp->dev->dev_addr) &&
+           (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
                DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
                   "Ignoring Zero MAC for STORAGE SD mode\n");
                return 0;
@@ -8571,7 +8962,8 @@ sp_rtnl_not_reset:
 #endif
        if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
                bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
-
+       if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
+               bnx2x_after_function_update(bp);
        /*
         * in case of fan failure we need to reset id if the "stop on error"
         * debug flag is set, since we trying to prevent permanent overheating
@@ -9148,7 +9540,9 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
        bp->link_params.feature_config_flags |=
                (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
                FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
-
+       bp->link_params.feature_config_flags |=
+               (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
+               FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
        bp->link_params.feature_config_flags |=
                (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
                FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
@@ -9780,6 +10174,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
 
                        } else
                                bp->flags |= NO_FCOE_FLAG;
+
+                       bp->mf_ext_config = cfg;
+
                } else { /* SD MODE */
                        if (IS_MF_STORAGE_SD(bp)) {
                                if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
@@ -9801,6 +10198,11 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                                memset(bp->dev->dev_addr, 0, ETH_ALEN);
                        }
                }
+
+               if (IS_MF_FCOE_AFEX(bp))
+                       /* use FIP MAC as primary MAC */
+                       memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
+
 #endif
        } else {
                /* in SF read MACs from port configuration */
@@ -9973,6 +10375,19 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
                                } else
                                        BNX2X_DEV_INFO("illegal MAC address for SI\n");
                                break;
+                       case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
+                               if ((!CHIP_IS_E1x(bp)) &&
+                                   (MF_CFG_RD(bp, func_mf_config[func].
+                                              mac_upper) != 0xffff) &&
+                                   (SHMEM2_HAS(bp,
+                                               afex_driver_support))) {
+                                       bp->mf_mode = MULTI_FUNCTION_AFEX;
+                                       bp->mf_config[vn] = MF_CFG_RD(bp,
+                                               func_mf_config[func].config);
+                               } else {
+                                       BNX2X_DEV_INFO("can not configure afex mode\n");
+                               }
+                               break;
                        case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
                                /* get OV configuration */
                                val = MF_CFG_RD(bp,
@@ -10013,6 +10428,9 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
                                return -EPERM;
                        }
                        break;
+               case MULTI_FUNCTION_AFEX:
+                       BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
+                       break;
                case MULTI_FUNCTION_SI:
                        BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
                                       func);
@@ -10180,6 +10598,9 @@ static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
                case MULTI_FUNCTION_SI:
                        SET_FLAGS(flags, MODE_MF_SI);
                        break;
+               case MULTI_FUNCTION_AFEX:
+                       SET_FLAGS(flags, MODE_MF_AFEX);
+                       break;
                }
        } else
                SET_FLAGS(flags, MODE_SF);
@@ -10242,7 +10663,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
        bp->disable_tpa = disable_tpa;
 
 #ifdef BCM_CNIC
-       bp->disable_tpa |= IS_MF_STORAGE_SD(bp);
+       bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
 #endif
 
        /* Set TPA flags */
@@ -10261,7 +10682,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
 
        bp->mrrs = mrrs;
 
-       bp->tx_ring_size = MAX_TX_AVAIL;
+       bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
 
        /* make sure that the numbers are in the right granularity */
        bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
@@ -11098,6 +11519,8 @@ void bnx2x__init_func_obj(struct bnx2x *bp)
        bnx2x_init_func_obj(bp, &bp->func_obj,
                            bnx2x_sp(bp, func_rdata),
                            bnx2x_sp_mapping(bp, func_rdata),
+                           bnx2x_sp(bp, func_afex_rdata),
+                           bnx2x_sp_mapping(bp, func_afex_rdata),
                            &bnx2x_func_sp_drv);
 }
 
index b72851d1bfc46e415ebf6a6225f9cd0b8a5d6873..9234eeaaa70789e51da575e7bc1038dc41d360d8 100644 (file)
@@ -634,14 +634,17 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
 }
 
 
-static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
-                                bool add, unsigned char *dev_addr, int index)
+void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+                         bool add, unsigned char *dev_addr, int index)
 {
        u32 wb_data[2];
        u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
                         NIG_REG_LLH0_FUNC_MEM;
 
-       if (!IS_MF_SI(bp) || index > BNX2X_LLH_CAM_MAX_PF_LINE)
+       if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
+               return;
+
+       if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
                return;
 
        DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
@@ -4399,6 +4402,9 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
                test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
        tx_data->anti_spoofing_flg =
                test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
+       tx_data->force_default_pri_flg =
+               test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
+
        tx_data->tx_status_block_id = params->fw_sb_id;
        tx_data->tx_sb_index_number = params->sb_cq_index;
        tx_data->tss_leading_client_id = params->tss_leading_cl_id;
@@ -5326,6 +5332,17 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
        case BNX2X_F_STATE_STARTED:
                if (cmd == BNX2X_F_CMD_STOP)
                        next_state = BNX2X_F_STATE_INITIALIZED;
+               /* afex ramrods can be sent only in started mode, and only
+                * if not pending for function_stop ramrod completion
+                * for these events - next state remained STARTED.
+                */
+               else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
+                        (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+                       next_state = BNX2X_F_STATE_STARTED;
+
+               else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
+                        (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+                       next_state = BNX2X_F_STATE_STARTED;
                else if (cmd == BNX2X_F_CMD_TX_STOP)
                        next_state = BNX2X_F_STATE_TX_STOPPED;
 
@@ -5613,6 +5630,83 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
                             U64_LO(data_mapping), NONE_CONNECTION_TYPE);
 }
 
+static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
+                                        struct bnx2x_func_state_params *params)
+{
+       struct bnx2x_func_sp_obj *o = params->f_obj;
+       struct function_update_data *rdata =
+               (struct function_update_data *)o->afex_rdata;
+       dma_addr_t data_mapping = o->afex_rdata_mapping;
+       struct bnx2x_func_afex_update_params *afex_update_params =
+               &params->params.afex_update;
+
+       memset(rdata, 0, sizeof(*rdata));
+
+       /* Fill the ramrod data with provided parameters */
+       rdata->vif_id_change_flg = 1;
+       rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
+       rdata->afex_default_vlan_change_flg = 1;
+       rdata->afex_default_vlan =
+               cpu_to_le16(afex_update_params->afex_default_vlan);
+       rdata->allowed_priorities_change_flg = 1;
+       rdata->allowed_priorities = afex_update_params->allowed_priorities;
+
+       /*  No need for an explicit memory barrier here as long we would
+        *  need to ensure the ordering of writing to the SPQ element
+        *  and updating of the SPQ producer which involves a memory
+        *  read and we will have to put a full memory barrier there
+        *  (inside bnx2x_sp_post()).
+        */
+       DP(BNX2X_MSG_SP,
+          "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
+          rdata->vif_id,
+          rdata->afex_default_vlan, rdata->allowed_priorities);
+
+       return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
+                            U64_HI(data_mapping),
+                            U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static
+inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
+                                        struct bnx2x_func_state_params *params)
+{
+       struct bnx2x_func_sp_obj *o = params->f_obj;
+       struct afex_vif_list_ramrod_data *rdata =
+               (struct afex_vif_list_ramrod_data *)o->afex_rdata;
+       struct bnx2x_func_afex_viflists_params *afex_viflist_params =
+               &params->params.afex_viflists;
+       u64 *p_rdata = (u64 *)rdata;
+
+       memset(rdata, 0, sizeof(*rdata));
+
+       /* Fill the ramrod data with provided parameters */
+       rdata->vif_list_index = afex_viflist_params->vif_list_index;
+       rdata->func_bit_map = afex_viflist_params->func_bit_map;
+       rdata->afex_vif_list_command =
+               afex_viflist_params->afex_vif_list_command;
+       rdata->func_to_clear = afex_viflist_params->func_to_clear;
+
+       /* send in echo type of sub command */
+       rdata->echo = afex_viflist_params->afex_vif_list_command;
+
+       /*  No need for an explicit memory barrier here as long we would
+        *  need to ensure the ordering of writing to the SPQ element
+        *  and updating of the SPQ producer which involves a memory
+        *  read and we will have to put a full memory barrier there
+        *  (inside bnx2x_sp_post()).
+        */
+
+       DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
+          rdata->afex_vif_list_command, rdata->vif_list_index,
+          rdata->func_bit_map, rdata->func_to_clear);
+
+       /* this ramrod sends data directly and not through DMA mapping */
+       return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
+                            U64_HI(*p_rdata), U64_LO(*p_rdata),
+                            NONE_CONNECTION_TYPE);
+}
+
 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
                                       struct bnx2x_func_state_params *params)
 {
@@ -5664,6 +5758,10 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
                return bnx2x_func_send_stop(bp, params);
        case BNX2X_F_CMD_HW_RESET:
                return bnx2x_func_hw_reset(bp, params);
+       case BNX2X_F_CMD_AFEX_UPDATE:
+               return bnx2x_func_send_afex_update(bp, params);
+       case BNX2X_F_CMD_AFEX_VIFLISTS:
+               return bnx2x_func_send_afex_viflists(bp, params);
        case BNX2X_F_CMD_TX_STOP:
                return bnx2x_func_send_tx_stop(bp, params);
        case BNX2X_F_CMD_TX_START:
@@ -5677,6 +5775,7 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
 void bnx2x_init_func_obj(struct bnx2x *bp,
                         struct bnx2x_func_sp_obj *obj,
                         void *rdata, dma_addr_t rdata_mapping,
+                        void *afex_rdata, dma_addr_t afex_rdata_mapping,
                         struct bnx2x_func_sp_drv_ops *drv_iface)
 {
        memset(obj, 0, sizeof(*obj));
@@ -5685,7 +5784,8 @@ void bnx2x_init_func_obj(struct bnx2x *bp,
 
        obj->rdata = rdata;
        obj->rdata_mapping = rdata_mapping;
-
+       obj->afex_rdata = afex_rdata;
+       obj->afex_rdata_mapping = afex_rdata_mapping;
        obj->send_cmd = bnx2x_func_send_cmd;
        obj->check_transition = bnx2x_func_chk_transition;
        obj->complete_cmd = bnx2x_func_comp_cmd;
index dee2f372a974a3820318ed45c099a9b69388a0d3..efd80bdd0dfe2c906697edf7a821eadfd87b80b2 100644 (file)
@@ -62,6 +62,8 @@ enum {
        BNX2X_FILTER_MCAST_PENDING,
        BNX2X_FILTER_MCAST_SCHED,
        BNX2X_FILTER_RSS_CONF_PENDING,
+       BNX2X_AFEX_FCOE_Q_UPDATE_PENDING,
+       BNX2X_AFEX_PENDING_VIFSET_MCP_ACK
 };
 
 struct bnx2x_raw_obj {
@@ -432,6 +434,8 @@ enum {
        BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
 };
 
+void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+                         bool add, unsigned char *dev_addr, int index);
 
 /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
 
@@ -798,7 +802,8 @@ enum {
        BNX2X_Q_FLG_TX_SWITCH,
        BNX2X_Q_FLG_TX_SEC,
        BNX2X_Q_FLG_ANTI_SPOOF,
-       BNX2X_Q_FLG_SILENT_VLAN_REM
+       BNX2X_Q_FLG_SILENT_VLAN_REM,
+       BNX2X_Q_FLG_FORCE_DEFAULT_PRI
 };
 
 /* Queue type options: queue type may be a compination of below. */
@@ -960,6 +965,11 @@ struct bnx2x_queue_state_params {
        } params;
 };
 
+struct bnx2x_viflist_params {
+       u8 echo_res;
+       u8 func_bit_map_res;
+};
+
 struct bnx2x_queue_sp_obj {
        u32             cids[BNX2X_MULTI_TX_COS];
        u8              cl_id;
@@ -1042,6 +1052,8 @@ enum bnx2x_func_cmd {
        BNX2X_F_CMD_START,
        BNX2X_F_CMD_STOP,
        BNX2X_F_CMD_HW_RESET,
+       BNX2X_F_CMD_AFEX_UPDATE,
+       BNX2X_F_CMD_AFEX_VIFLISTS,
        BNX2X_F_CMD_TX_STOP,
        BNX2X_F_CMD_TX_START,
        BNX2X_F_CMD_MAX,
@@ -1086,6 +1098,18 @@ struct bnx2x_func_start_params {
        u8 network_cos_mode;
 };
 
+struct bnx2x_func_afex_update_params {
+       u16 vif_id;
+       u16 afex_default_vlan;
+       u8 allowed_priorities;
+};
+
+struct bnx2x_func_afex_viflists_params {
+       u16 vif_list_index;
+       u8 func_bit_map;
+       u8 afex_vif_list_command;
+       u8 func_to_clear;
+};
 struct bnx2x_func_tx_start_params {
        struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
        u8 dcb_enabled;
@@ -1107,6 +1131,8 @@ struct bnx2x_func_state_params {
                struct bnx2x_func_hw_init_params hw_init;
                struct bnx2x_func_hw_reset_params hw_reset;
                struct bnx2x_func_start_params start;
+               struct bnx2x_func_afex_update_params afex_update;
+               struct bnx2x_func_afex_viflists_params afex_viflists;
                struct bnx2x_func_tx_start_params tx_start;
        } params;
 };
@@ -1151,6 +1177,13 @@ struct bnx2x_func_sp_obj {
        void                    *rdata;
        dma_addr_t              rdata_mapping;
 
+       /* Buffer to use as a afex ramrod data and its mapping.
+        * This can't be same rdata as above because afex ramrod requests
+        * can arrive to the object in parallel to other ramrod requests.
+        */
+       void                    *afex_rdata;
+       dma_addr_t              afex_rdata_mapping;
+
        /* this mutex validates that when pending flag is taken, the next
         * ramrod to be sent will be the one set the pending bit
         */
@@ -1194,6 +1227,7 @@ union bnx2x_qable_obj {
 void bnx2x_init_func_obj(struct bnx2x *bp,
                         struct bnx2x_func_sp_obj *obj,
                         void *rdata, dma_addr_t rdata_mapping,
+                        void *afex_rdata, dma_addr_t afex_rdata_mapping,
                         struct bnx2x_func_sp_drv_ops *drv_iface);
 
 int bnx2x_func_state_change(struct bnx2x *bp,
index e1c9310fb07cad5cad61518a08c0b9974f8e8eff..7366e92c3fa711cc8aab5d57998b4eaaedccc1a6 100644 (file)
@@ -1561,3 +1561,274 @@ void bnx2x_save_statistics(struct bnx2x *bp)
                UPDATE_FW_STAT_OLD(mac_discard);
        }
 }
+
+void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
+                             u32 stats_type)
+{
+       int i;
+       struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
+       struct bnx2x_eth_stats *estats = &bp->eth_stats;
+       struct per_queue_stats *fcoe_q_stats =
+               &bp->fw_stats_data->queue_stats[FCOE_IDX];
+
+       struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+               &fcoe_q_stats->tstorm_queue_statistics;
+
+       struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
+               &fcoe_q_stats->ustorm_queue_statistics;
+
+       struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+               &fcoe_q_stats->xstorm_queue_statistics;
+
+       struct fcoe_statistics_params *fw_fcoe_stat =
+               &bp->fw_stats_data->fcoe;
+
+       memset(afex_stats, 0, sizeof(struct afex_stats));
+
+       for_each_eth_queue(bp, i) {
+               struct bnx2x_fastpath *fp = &bp->fp[i];
+               struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+
+               ADD_64(afex_stats->rx_unicast_bytes_hi,
+                      qstats->total_unicast_bytes_received_hi,
+                      afex_stats->rx_unicast_bytes_lo,
+                      qstats->total_unicast_bytes_received_lo);
+
+               ADD_64(afex_stats->rx_broadcast_bytes_hi,
+                      qstats->total_broadcast_bytes_received_hi,
+                      afex_stats->rx_broadcast_bytes_lo,
+                      qstats->total_broadcast_bytes_received_lo);
+
+               ADD_64(afex_stats->rx_multicast_bytes_hi,
+                      qstats->total_multicast_bytes_received_hi,
+                      afex_stats->rx_multicast_bytes_lo,
+                      qstats->total_multicast_bytes_received_lo);
+
+               ADD_64(afex_stats->rx_unicast_frames_hi,
+                      qstats->total_unicast_packets_received_hi,
+                      afex_stats->rx_unicast_frames_lo,
+                      qstats->total_unicast_packets_received_lo);
+
+               ADD_64(afex_stats->rx_broadcast_frames_hi,
+                      qstats->total_broadcast_packets_received_hi,
+                      afex_stats->rx_broadcast_frames_lo,
+                      qstats->total_broadcast_packets_received_lo);
+
+               ADD_64(afex_stats->rx_multicast_frames_hi,
+                      qstats->total_multicast_packets_received_hi,
+                      afex_stats->rx_multicast_frames_lo,
+                      qstats->total_multicast_packets_received_lo);
+
+               /* sum to rx_frames_discarded all discraded
+                * packets due to size, ttl0 and checksum
+                */
+               ADD_64(afex_stats->rx_frames_discarded_hi,
+                      qstats->total_packets_received_checksum_discarded_hi,
+                      afex_stats->rx_frames_discarded_lo,
+                      qstats->total_packets_received_checksum_discarded_lo);
+
+               ADD_64(afex_stats->rx_frames_discarded_hi,
+                      qstats->total_packets_received_ttl0_discarded_hi,
+                      afex_stats->rx_frames_discarded_lo,
+                      qstats->total_packets_received_ttl0_discarded_lo);
+
+               ADD_64(afex_stats->rx_frames_discarded_hi,
+                      qstats->etherstatsoverrsizepkts_hi,
+                      afex_stats->rx_frames_discarded_lo,
+                      qstats->etherstatsoverrsizepkts_lo);
+
+               ADD_64(afex_stats->rx_frames_dropped_hi,
+                      qstats->no_buff_discard_hi,
+                      afex_stats->rx_frames_dropped_lo,
+                      qstats->no_buff_discard_lo);
+
+               ADD_64(afex_stats->tx_unicast_bytes_hi,
+                      qstats->total_unicast_bytes_transmitted_hi,
+                      afex_stats->tx_unicast_bytes_lo,
+                      qstats->total_unicast_bytes_transmitted_lo);
+
+               ADD_64(afex_stats->tx_broadcast_bytes_hi,
+                      qstats->total_broadcast_bytes_transmitted_hi,
+                      afex_stats->tx_broadcast_bytes_lo,
+                      qstats->total_broadcast_bytes_transmitted_lo);
+
+               ADD_64(afex_stats->tx_multicast_bytes_hi,
+                      qstats->total_multicast_bytes_transmitted_hi,
+                      afex_stats->tx_multicast_bytes_lo,
+                      qstats->total_multicast_bytes_transmitted_lo);
+
+               ADD_64(afex_stats->tx_unicast_frames_hi,
+                      qstats->total_unicast_packets_transmitted_hi,
+                      afex_stats->tx_unicast_frames_lo,
+                      qstats->total_unicast_packets_transmitted_lo);
+
+               ADD_64(afex_stats->tx_broadcast_frames_hi,
+                      qstats->total_broadcast_packets_transmitted_hi,
+                      afex_stats->tx_broadcast_frames_lo,
+                      qstats->total_broadcast_packets_transmitted_lo);
+
+               ADD_64(afex_stats->tx_multicast_frames_hi,
+                      qstats->total_multicast_packets_transmitted_hi,
+                      afex_stats->tx_multicast_frames_lo,
+                      qstats->total_multicast_packets_transmitted_lo);
+
+               ADD_64(afex_stats->tx_frames_dropped_hi,
+                      qstats->total_transmitted_dropped_packets_error_hi,
+                      afex_stats->tx_frames_dropped_lo,
+                      qstats->total_transmitted_dropped_packets_error_lo);
+       }
+
+       /* now add FCoE statistics which are collected separately
+        * (both offloaded and non offloaded)
+        */
+       if (!NO_FCOE(bp)) {
+               ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
+                         LE32_0,
+                         afex_stats->rx_unicast_bytes_lo,
+                         fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+               ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
+                         fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+                         afex_stats->rx_unicast_bytes_lo,
+                         fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+               ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
+                         fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+                         afex_stats->rx_broadcast_bytes_lo,
+                         fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+               ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
+                         fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+                         afex_stats->rx_multicast_bytes_lo,
+                         fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+               ADD_64_LE(afex_stats->rx_unicast_frames_hi,
+                         LE32_0,
+                         afex_stats->rx_unicast_frames_lo,
+                         fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+               ADD_64_LE(afex_stats->rx_unicast_frames_hi,
+                         LE32_0,
+                         afex_stats->rx_unicast_frames_lo,
+                         fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+               ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
+                         LE32_0,
+                         afex_stats->rx_broadcast_frames_lo,
+                         fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+               ADD_64_LE(afex_stats->rx_multicast_frames_hi,
+                         LE32_0,
+                         afex_stats->rx_multicast_frames_lo,
+                         fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+               ADD_64_LE(afex_stats->rx_frames_discarded_hi,
+                         LE32_0,
+                         afex_stats->rx_frames_discarded_lo,
+                         fcoe_q_tstorm_stats->checksum_discard);
+
+               ADD_64_LE(afex_stats->rx_frames_discarded_hi,
+                         LE32_0,
+                         afex_stats->rx_frames_discarded_lo,
+                         fcoe_q_tstorm_stats->pkts_too_big_discard);
+
+               ADD_64_LE(afex_stats->rx_frames_discarded_hi,
+                         LE32_0,
+                         afex_stats->rx_frames_discarded_lo,
+                         fcoe_q_tstorm_stats->ttl0_discard);
+
+               ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
+                           LE16_0,
+                           afex_stats->rx_frames_dropped_lo,
+                           fcoe_q_tstorm_stats->no_buff_discard);
+
+               ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+                         LE32_0,
+                         afex_stats->rx_frames_dropped_lo,
+                         fcoe_q_ustorm_stats->ucast_no_buff_pkts);
+
+               ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+                         LE32_0,
+                         afex_stats->rx_frames_dropped_lo,
+                         fcoe_q_ustorm_stats->mcast_no_buff_pkts);
+
+               ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+                         LE32_0,
+                         afex_stats->rx_frames_dropped_lo,
+                         fcoe_q_ustorm_stats->bcast_no_buff_pkts);
+
+               ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+                         LE32_0,
+                         afex_stats->rx_frames_dropped_lo,
+                         fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
+
+               ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+                         LE32_0,
+                         afex_stats->rx_frames_dropped_lo,
+                         fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
+
+               ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
+                         LE32_0,
+                         afex_stats->tx_unicast_bytes_lo,
+                         fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+               ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
+                         fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+                         afex_stats->tx_unicast_bytes_lo,
+                         fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+               ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
+                         fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+                         afex_stats->tx_broadcast_bytes_lo,
+                         fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+               ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
+                         fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+                         afex_stats->tx_multicast_bytes_lo,
+                         fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+               ADD_64_LE(afex_stats->tx_unicast_frames_hi,
+                         LE32_0,
+                         afex_stats->tx_unicast_frames_lo,
+                         fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+               ADD_64_LE(afex_stats->tx_unicast_frames_hi,
+                         LE32_0,
+                         afex_stats->tx_unicast_frames_lo,
+                         fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+               ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
+                         LE32_0,
+                         afex_stats->tx_broadcast_frames_lo,
+                         fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+               ADD_64_LE(afex_stats->tx_multicast_frames_hi,
+                         LE32_0,
+                         afex_stats->tx_multicast_frames_lo,
+                         fcoe_q_xstorm_stats->mcast_pkts_sent);
+
+               ADD_64_LE(afex_stats->tx_frames_dropped_hi,
+                         LE32_0,
+                         afex_stats->tx_frames_dropped_lo,
+                         fcoe_q_xstorm_stats->error_drop_pkts);
+       }
+
+       /* if port stats are requested, add them to the PMF
+        * stats, as anyway they will be accumulated by the
+        * MCP before sent to the switch
+        */
+       if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
+               ADD_64(afex_stats->rx_frames_dropped_hi,
+                      0,
+                      afex_stats->rx_frames_dropped_lo,
+                      estats->mac_filter_discard);
+               ADD_64(afex_stats->rx_frames_dropped_hi,
+                      0,
+                      afex_stats->rx_frames_dropped_lo,
+                      estats->brb_truncate_discard);
+               ADD_64(afex_stats->rx_frames_discarded_hi,
+                      0,
+                      afex_stats->rx_frames_discarded_lo,
+                      estats->mac_discard);
+       }
+}
index 2b46e1eb7fd1dab6a5c42928df90f90d91bd3e03..93e689fdfeda9b8fa78d617499c18aea53573c4d 100644 (file)
@@ -338,6 +338,18 @@ struct bnx2x_fw_port_stats_old {
                s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
        } while (0)
 
+#define LE32_0 ((__force __le32) 0)
+#define LE16_0 ((__force __le16) 0)
+
+/* The _force is for cases where high value is 0 */
+#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \
+               ADD_64(s_hi, le32_to_cpu(a_hi_le), \
+                      s_lo, le32_to_cpu(a_lo_le))
+
+#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \
+               ADD_64(s_hi, le16_to_cpu(a_hi_le), \
+                      s_lo, le16_to_cpu(a_lo_le))
+
 /* difference = minuend - subtrahend */
 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
        do { \
@@ -529,4 +541,7 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
  * @bp:                driver handle
  */
 void bnx2x_save_statistics(struct bnx2x *bp);
+
+void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
+                             u32 stats_type);
 #endif /* BNX2X_STATS_H */