#define UNDEF_IRO 0x80000000
 
+/* used for defining the amount of FCoE tasks supported for PF */
+#define MAX_FCOE_FUNCS_PER_ENGINE 2
+#define MAX_NUM_FCOE_TASKS_PER_ENGINE 4096
+
 #endif /* BNX2X_FW_DEFS_H */
 
                (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
                BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
 
+       /* Calculate the number of maximum allowed FCoE tasks */
+       bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
+       if (IS_MF(bp) || CHIP_MODE_IS_4_PORT(bp))
+               bp->cnic_eth_dev.max_fcoe_exchanges /=
+                                               MAX_FCOE_FUNCS_PER_ENGINE;
+
        /* Read the WWN: */
        if (!IS_MF(bp)) {
                /* Port info */
 
 
        if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
                cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
-       if (CNIC_SUPPORTS_FCOE(cp))
+       if (CNIC_SUPPORTS_FCOE(cp)) {
                cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
+               cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
+       }
 
        if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
                cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
 
        u32             max_fcoe_conn;
        u32             max_rdma_conn;
        u32             fcoe_init_cid;
+       u32             max_fcoe_exchanges;
        u32             fcoe_wwn_port_name_hi;
        u32             fcoe_wwn_port_name_lo;
        u32             fcoe_wwn_node_name_hi;
        int             max_fcoe_conn;
        int             max_rdma_conn;
 
+       int             max_fcoe_exchanges;
+
        union drv_info_to_mcp   *stats_addr;
        struct fcoe_capabilities        *fcoe_cap;
 
 
 
 #define BNX2FC_MAX_NPIV                256
 
-#define BNX2FC_MAX_OUTSTANDING_CMNDS   2048
-#define BNX2FC_CAN_QUEUE               BNX2FC_MAX_OUTSTANDING_CMNDS
-#define BNX2FC_ELSTM_XIDS              BNX2FC_CAN_QUEUE
 #define BNX2FC_MIN_PAYLOAD             256
 #define BNX2FC_MAX_PAYLOAD             2048
 #define BNX2FC_MFS                     \
 #define BNX2FC_CONFQ_WQE_SIZE          (sizeof(struct fcoe_confqe))
 #define BNX2FC_5771X_DB_PAGE_SIZE      128
 
-#define BNX2FC_MAX_TASKS               \
-                            (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS)
 #define BNX2FC_TASK_SIZE               128
 #define        BNX2FC_TASKS_PER_PAGE           (PAGE_SIZE/BNX2FC_TASK_SIZE)
-#define BNX2FC_TASK_CTX_ARR_SZ         (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE)
 
 #define BNX2FC_MAX_ROWS_IN_HASH_TBL    8
 #define BNX2FC_HASH_TBL_CHUNK_SIZE     (16 * 1024)
 #define BNX2FC_WRITE                   (1 << 0)
 
 #define BNX2FC_MIN_XID                 0
-#define BNX2FC_MAX_XID                 \
-                       (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1)
 #define FCOE_MAX_NUM_XIDS              0x2000
-#define FCOE_MIN_XID                   (BNX2FC_MAX_XID + 1)
-#define FCOE_MAX_XID                   (FCOE_MIN_XID + FCOE_MAX_NUM_XIDS - 1)
-#define FCOE_XIDS_PER_CPU              (FCOE_MIN_XID + (512 * nr_cpu_ids) - 1)
+#define FCOE_MAX_XID_OFFSET            (FCOE_MAX_NUM_XIDS - 1)
+#define FCOE_XIDS_PER_CPU_OFFSET       ((512 * nr_cpu_ids) - 1)
 #define BNX2FC_MAX_LUN                 0xFFFF
 #define BNX2FC_MAX_FCP_TGT             256
 #define BNX2FC_MAX_CMD_LEN             16
                #define BNX2FC_FLAG_FW_INIT_DONE        0
                #define BNX2FC_FLAG_DESTROY_CMPL        1
        u32 next_conn_id;
+
+       /* xid resources */
+       u16 max_xid;
+       u32 max_tasks;
+       u32 max_outstanding_cmds;
+       u32 elstm_xids;
+
        struct fcoe_task_ctx_entry **task_ctx;
        dma_addr_t *task_ctx_dma;
        struct regpair *task_ctx_bd_tbl;
 void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba);
 int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba);
 void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba);
-struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
-                                               u16 min_xid, u16 max_xid);
+struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba);
 void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr);
 void bnx2fc_get_link_state(struct bnx2fc_hba *hba);
 char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items);
 
 static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
 static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
 static int bnx2fc_lport_config(struct fc_lport *lport);
-static int bnx2fc_em_config(struct fc_lport *lport);
+static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba);
 static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
 static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
 static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
        return 0;
 }
 
-static int bnx2fc_em_config(struct fc_lport *lport)
+static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba)
 {
-       int max_xid;
+       int fcoe_min_xid, fcoe_max_xid;
 
+       fcoe_min_xid = hba->max_xid + 1;
        if (nr_cpu_ids <= 2)
-               max_xid = FCOE_XIDS_PER_CPU;
+               fcoe_max_xid = hba->max_xid + FCOE_XIDS_PER_CPU_OFFSET;
        else
-               max_xid = FCOE_MAX_XID;
-       if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
-                               max_xid, NULL)) {
+               fcoe_max_xid = hba->max_xid + FCOE_MAX_XID_OFFSET;
+       if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, fcoe_min_xid,
+                              fcoe_max_xid, NULL)) {
                printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
                return -ENOMEM;
        }
        mutex_init(&hba->hba_mutex);
 
        hba->cnic = cnic;
+
+       hba->max_tasks = cnic->max_fcoe_exchanges;
+       hba->elstm_xids = (hba->max_tasks / 2);
+       hba->max_outstanding_cmds = hba->elstm_xids;
+       hba->max_xid = (hba->max_tasks - 1);
+
        rc = bnx2fc_bind_pcidev(hba);
        if (rc) {
                printk(KERN_ERR PFX "create_adapter:  bind error\n");
 
        hba->num_ofld_sess = 0;
 
-       hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
-                                               BNX2FC_MAX_XID);
+       hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba);
        if (!hba->cmd_mgr) {
                printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
                goto cmgr_err;
                                        FCOE_IOS_PER_CONNECTION_SHIFT;
        fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS <<
                                        FCOE_LOGINS_PER_PORT_SHIFT;
-       fcoe_cap->capability2 = BNX2FC_MAX_OUTSTANDING_CMNDS <<
+       fcoe_cap->capability2 = hba->max_outstanding_cmds <<
                                        FCOE_NUMBER_OF_EXCHANGES_SHIFT;
        fcoe_cap->capability2 |= BNX2FC_MAX_NPIV <<
                                        FCOE_NPIV_WWN_PER_PORT_SHIFT;
        fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS <<
                                        FCOE_TARGETS_SUPPORTED_SHIFT;
-       fcoe_cap->capability3 |= BNX2FC_MAX_OUTSTANDING_CMNDS <<
+       fcoe_cap->capability3 |= hba->max_outstanding_cmds <<
                                        FCOE_OUTSTANDING_COMMANDS_SHIFT;
        fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL;
 
        struct Scsi_Host        *shost;
        struct fc_vport         *vport = dev_to_vport(parent);
        struct bnx2fc_lport     *blport;
-       struct bnx2fc_hba       *hba;
+       struct bnx2fc_hba       *hba = interface->hba;
        int                     rc = 0;
 
        blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
        }
 
        /* Allocate Scsi_Host structure */
+       bnx2fc_shost_template.can_queue = hba->max_outstanding_cmds;
        if (!npiv)
                lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port));
        else
 
        /* Allocate exchange manager */
        if (!npiv)
-               rc = bnx2fc_em_config(lport);
+               rc = bnx2fc_em_config(lport, hba);
        else {
                shost = vport_to_shost(vport);
                n_port = shost_priv(shost);
 
        bnx2fc_interface_get(interface);
 
-       hba = interface->hba;
        spin_lock_bh(&hba->hba_lock);
        blport->lport = lport;
        list_add_tail(&blport->list, &hba->vports);
        .change_queue_type      = fc_change_queue_type,
        .this_id                = -1,
        .cmd_per_lun            = 3,
-       .can_queue              = BNX2FC_CAN_QUEUE,
        .use_clustering         = ENABLE_CLUSTERING,
        .sg_tablesize           = BNX2FC_MAX_BDS_PER_CMD,
        .max_sectors            = 1024,
 
        fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
                                        FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 
-       fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
+       fcoe_init1.num_tasks = hba->max_tasks;
        fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
        fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
        fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
                        err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
 
 
-               if (xid > BNX2FC_MAX_XID) {
+               if (xid > hba->max_xid) {
                        BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
                                   xid);
                        goto ret_err_rqe;
                BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
                        err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
 
-               if (xid > BNX2FC_MAX_XID) {
+               if (xid > hba->max_xid) {
                        BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
                        goto ret_warn_rqe;
                }
 
        spin_lock_bh(&tgt->tgt_lock);
        xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
-       if (xid >= BNX2FC_MAX_TASKS) {
+       if (xid >= hba->max_tasks) {
                printk(KERN_ERR PFX "ERROR:xid out of range\n");
                spin_unlock_bh(&tgt->tgt_lock);
                return;
        int rc = 0;
        struct regpair *task_ctx_bdt;
        dma_addr_t addr;
+       int task_ctx_arr_sz;
        int i;
 
        /*
         * Allocate task_ctx which is an array of pointers pointing to
         * a page containing 32 task contexts
         */
-       hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
+       task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
+       hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)),
                                 GFP_KERNEL);
        if (!hba->task_ctx) {
                printk(KERN_ERR PFX "unable to allocate task context array\n");
        /*
         * Allocate task_ctx_dma which is an array of dma addresses
         */
-       hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
+       hba->task_ctx_dma = kmalloc((task_ctx_arr_sz *
                                        sizeof(dma_addr_t)), GFP_KERNEL);
        if (!hba->task_ctx_dma) {
                printk(KERN_ERR PFX "unable to alloc context mapping array\n");
        }
 
        task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
-       for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+       for (i = 0; i < task_ctx_arr_sz; i++) {
 
                hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
                                                      PAGE_SIZE,
        return 0;
 
 out3:
-       for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+       for (i = 0; i < task_ctx_arr_sz; i++) {
                if (hba->task_ctx[i]) {
 
                        dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
 
 void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
 {
+       int task_ctx_arr_sz;
        int i;
 
        if (hba->task_ctx_bd_tbl) {
                hba->task_ctx_bd_tbl = NULL;
        }
 
+       task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
        if (hba->task_ctx) {
-               for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+               for (i = 0; i < task_ctx_arr_sz; i++) {
                        if (hba->task_ctx[i]) {
                                dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
                                                    hba->task_ctx[i],
 
        sc_cmd->scsi_done(sc_cmd);
 }
 
-struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
-                                               u16 min_xid, u16 max_xid)
+struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
 {
        struct bnx2fc_cmd_mgr *cmgr;
        struct io_bdt *bdt_info;
        int num_ios, num_pri_ios;
        size_t bd_tbl_sz;
        int arr_sz = num_possible_cpus() + 1;
+       u16 min_xid = BNX2FC_MIN_XID;
+       u16 max_xid = hba->max_xid;
 
        if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
                printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
         * of slow path requests.
         */
        xid = BNX2FC_MIN_XID;
-       num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS;
+       num_pri_ios = num_ios - hba->elstm_xids;
        for (i = 0; i < num_ios; i++) {
                io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
 
        struct bnx2fc_hba *hba = cmgr->hba;
        size_t bd_tbl_sz;
        u16 min_xid = BNX2FC_MIN_XID;
-       u16 max_xid = BNX2FC_MAX_XID;
+       u16 max_xid = hba->max_xid;
        int num_ios;
        int i;