]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
qed: Introduce iWARP personality
authorKalderon, Michal <Michal.Kalderon@cavium.com>
Sun, 2 Jul 2017 07:29:21 +0000 (10:29 +0300)
committerChuck Anderson <chuck.anderson@oracle.com>
Tue, 19 Sep 2017 05:32:36 +0000 (22:32 -0700)
Orabug: 26783820

iWARP personality introduced the need for differentiating in several
places in the code whether we are RoCE, iWARP or either. This
leads to introducing new macros for querying the personality.

Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
[ Upstream commit c851a9dc4359c6b19722de568e9f543c1c23481c ]
Signed-off-by: Somasundaram Krishnasamy <somasundaram.krishnasamy@oracle.com>
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_cxt.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_main.c
include/linux/qed/common_hsi.h

index 0944fb5879b6415afa8b15884af4d57b1e036ccf..4db2adbf2d6d6c3f8970fba62fb2a23225d0dc2e 100644 (file)
@@ -211,14 +211,16 @@ struct qed_tunn_update_params {
 
 /* The PCI personality is not quite synonymous to protocol ID:
  * 1. All personalities need CORE connections
- * 2. The Ethernet personality may support also the RoCE protocol
+ * 2. The Ethernet personality may support also the RoCE/iWARP protocol
  */
 enum qed_pci_personality {
        QED_PCI_ETH,
        QED_PCI_FCOE,
        QED_PCI_ISCSI,
        QED_PCI_ETH_ROCE,
-       QED_PCI_DEFAULT /* default in shmem */
+       QED_PCI_ETH_IWARP,
+       QED_PCI_ETH_RDMA,
+       QED_PCI_DEFAULT, /* default in shmem */
 };
 
 /* All VFs are symmetric, all counters are PF + all VFs */
@@ -278,6 +280,7 @@ enum qed_dev_cap {
        QED_DEV_CAP_FCOE,
        QED_DEV_CAP_ISCSI,
        QED_DEV_CAP_ROCE,
+       QED_DEV_CAP_IWARP,
 };
 
 enum qed_wol_support {
@@ -287,7 +290,24 @@ enum qed_wol_support {
 
 struct qed_hw_info {
        /* PCI personality */
-       enum qed_pci_personality        personality;
+       enum qed_pci_personality personality;
+#define QED_IS_RDMA_PERSONALITY(dev)                       \
+       ((dev)->hw_info.personality == QED_PCI_ETH_ROCE ||  \
+        (dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
+        (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
+#define QED_IS_ROCE_PERSONALITY(dev)                      \
+       ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
+        (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
+#define QED_IS_IWARP_PERSONALITY(dev)                      \
+       ((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
+        (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
+#define QED_IS_L2_PERSONALITY(dev)                   \
+       ((dev)->hw_info.personality == QED_PCI_ETH || \
+        QED_IS_RDMA_PERSONALITY(dev))
+#define QED_IS_FCOE_PERSONALITY(dev) \
+       ((dev)->hw_info.personality == QED_PCI_FCOE)
+#define QED_IS_ISCSI_PERSONALITY(dev) \
+       ((dev)->hw_info.personality == QED_PCI_ISCSI)
 
        /* Resource Allocation scheme results */
        u32                             resc_start[QED_MAX_RESC];
index f39ab40ae0cef38ea5fcc7854890cdc986596291..ddfbc478d11a07db1d81865ac2aed1340993257d 100644 (file)
@@ -978,7 +978,7 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
        u32 lines, line, sz_left, lines_to_skip = 0;
 
        /* Special handling for RoCE that supports dynamic allocation */
-       if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) &&
+       if (QED_IS_RDMA_PERSONALITY(p_hwfn) &&
            ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
                return 0;
 
@@ -1778,7 +1778,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
                tm_offset += tm_iids.pf_tids[i];
        }
 
-       if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE)
+       if (QED_IS_RDMA_PERSONALITY(p_hwfn))
                active_seg_mask = 0;
 
        STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
@@ -2285,7 +2285,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
                       last_cid_allocated - 1);
 
                if (!p_hwfn->b_rdma_enabled_in_prs) {
-                       /* Enable RoCE search */
+                       /* Enable RDMA search */
                        qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
                        p_hwfn->b_rdma_enabled_in_prs = true;
                }
index 0ef54748d5410a3fb3c6c9e15e3a0cba0e19286b..29a5b08b6c1c0eb19516a6ef4d82551d242390d3 100644 (file)
@@ -911,7 +911,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
 
                /* EQ */
                n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
-               if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+               if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
                        num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
                                                               PROTOCOLID_ROCE,
                                                               NULL) * 2;
@@ -2032,7 +2032,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
        qed_int_get_num_sbs(p_hwfn, &sb_cnt);
 
        if (IS_ENABLED(CONFIG_QED_RDMA) &&
-           p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+           QED_IS_RDMA_PERSONALITY(p_hwfn)) {
                /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
                 * the status blocks equally between L2 / RoCE but with
                 * consideration as to how many l2 queues / cnqs we have.
@@ -2043,9 +2043,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
 
                non_l2_sbs = feat_num[QED_RDMA_CNQ];
        }
-
-       if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
-           p_hwfn->hw_info.personality == QED_PCI_ETH) {
+       if (QED_IS_L2_PERSONALITY(p_hwfn)) {
                /* Start by allocating VF queues, then PF's */
                feat_num[QED_VF_L2_QUE] = min_t(u32,
                                                RESC_NUM(p_hwfn, QED_L2_QUEUE),
@@ -2058,12 +2056,12 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
                                                         QED_VF_L2_QUE));
        }
 
-       if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
+       if (QED_IS_FCOE_PERSONALITY(p_hwfn))
                feat_num[QED_FCOE_CQ] =  min_t(u32, sb_cnt.cnt,
                                               RESC_NUM(p_hwfn,
                                                        QED_CMDQS_CQS));
 
-       if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+       if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
                feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
                                               RESC_NUM(p_hwfn,
                                                        QED_CMDQS_CQS));
index 499efa9b40b831f0aa0f7a27170e4a6244603a04..2285f7200e7cdacf58e4c064bfb85ddd68fb024e 100644 (file)
@@ -79,8 +79,7 @@ int qed_l2_alloc(struct qed_hwfn *p_hwfn)
        unsigned long **pp_qids;
        u32 i;
 
-       if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
-           p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+       if (!QED_IS_L2_PERSONALITY(p_hwfn))
                return 0;
 
        p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
index 09b998d4bbaa0da871d5d2e061762a840bc913e8..abefe912bced37256d470cd2b8bde3fa3f97b187 100644 (file)
@@ -1421,7 +1421,7 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
        if (rc)
                goto out;
 
-       if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+       if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
                qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
 
        qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
index ddaa5bd7daa0cf8f7cf95e9631fd37e4084a083a..7c6e47e820a8e5dcf8d9a649e6b3a68a85a4eb13 100644 (file)
@@ -237,6 +237,8 @@ err0:
 int qed_fill_dev_info(struct qed_dev *cdev,
                      struct qed_dev_info *dev_info)
 {
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_hw_info *hw_info = &p_hwfn->hw_info;
        struct qed_tunnel_info *tun = &cdev->tunnel;
        struct qed_ptt  *ptt;
 
@@ -260,11 +262,10 @@ int qed_fill_dev_info(struct qed_dev *cdev,
        dev_info->pci_mem_start = cdev->pci_params.mem_start;
        dev_info->pci_mem_end = cdev->pci_params.mem_end;
        dev_info->pci_irq = cdev->pci_params.irq;
-       dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
-                                   QED_PCI_ETH_ROCE);
+       dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
        dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
        dev_info->dev_type = cdev->type;
-       ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
+       ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
 
        if (IS_PF(cdev)) {
                dev_info->fw_major = FW_MAJOR_VERSION;
@@ -274,8 +275,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
                dev_info->mf_mode = cdev->mf_mode;
                dev_info->tx_switching = true;
 
-               if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support ==
-                   QED_WOL_SUPPORT_PME)
+               if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
                        dev_info->wol_support = true;
 
                dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
@@ -304,7 +304,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
                                    &dev_info->mfw_rev, NULL);
        }
 
-       dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu;
+       dev_info->mtu = hw_info->mtu;
 
        return 0;
 }
@@ -790,7 +790,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
                                       cdev->num_hwfns;
 
        if (!IS_ENABLED(CONFIG_QED_RDMA) ||
-           QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE)
+           !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
                return 0;
 
        for_each_hwfn(cdev, i)
@@ -932,8 +932,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
        /* In case we might support RDMA, don't allow qede to be greedy
         * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
         */
-       if (QED_LEADING_HWFN(cdev)->hw_info.personality ==
-           QED_PCI_ETH_ROCE) {
+       if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
                u16 *num_cons;
 
                num_cons = &params->eth_pf_params.num_cons;
index a567cbf8c5b465edf3aff52d0454886e3d0239c7..885ae1379b5ae4c2febd7a2a965ded4e2e8f7981 100644 (file)
@@ -778,7 +778,7 @@ enum protocol_type {
        PROTOCOLID_ROCE,
        PROTOCOLID_CORE,
        PROTOCOLID_ETH,
-       PROTOCOLID_RESERVED4,
+       PROTOCOLID_IWARP,
        PROTOCOLID_RESERVED5,
        PROTOCOLID_PREROCE,
        PROTOCOLID_COMMON,