obj-$(CONFIG_BNA) += bna.o
 
-bna-objs := bnad.o bnad_ethtool.o bna_ctrl.o bna_txrx.o
+bna-objs := bnad.o bnad_ethtool.o bna_enet.o bna_tx_rx.o
 bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
 bna-objs += cna_fwimg.o
 
 
        BFA_IOC_DISABLED        = 10,   /*!< IOC is disabled */
        BFA_IOC_FWMISMATCH      = 11,   /*!< IOC f/w different from drivers */
        BFA_IOC_ENABLING        = 12,   /*!< IOC is being enabled */
+       BFA_IOC_HWFAIL          = 13,   /*!< PCI mapping doesn't exist */
 };
 
 /**
        struct bfa_adapter_attr adapter_attr;   /*!< HBA attributes */
        struct bfa_ioc_driver_attr driver_attr; /*!< driver attr    */
        struct bfa_ioc_pci_attr pci_attr;
-       u8                              port_id;        /*!< port number    */
-       u8                              rsvd[7];        /*!< 64bit align    */
+       u8                              port_id;        /*!< port number */
+       u8                              port_mode;      /*!< enum bfa_mode */
+       u8                              cap_bm;         /*!< capability */
+       u8                              port_mode_cfg;  /*!< enum bfa_mode */
+       u8                              rsvd[4];        /*!< 64bit align */
+};
+
+/**
+ * Adapter capability mask definition
+ */
+enum {
+       BFA_CM_HBA      =       0x01,
+       BFA_CM_CNA      =       0x02,
 };
 
 /**
        mac_t           mfg_mac;        /*!< mac address */
        u8              num_mac;        /*!< number of mac addresses */
        u8              rsv2;
-       u32     mfg_type;       /*!< card type */
+       u32             card_type;      /*!< card type */
        u8              rsv3[108];
        u8              md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
 };
 #define bfa_asic_id_ct(devid)                  \
        ((devid) == PCI_DEVICE_ID_BROCADE_CT || \
        (devid) == PCI_DEVICE_ID_BROCADE_CT_FC)
+#define bfa_asic_id_ctc(devid) (bfa_asic_id_ct(devid))
+
+enum bfa_mode {
+       BFA_MODE_HBA            = 1,
+       BFA_MODE_CNA            = 2,
+       BFA_MODE_NIC            = 3
+};
 
 #endif /* __BFA_DEFS_H__ */
 
 #define __BFA_DEFS_MFG_COMM_H__
 
 #include "cna.h"
+#include "bfa_defs.h"
 
 /**
  * Manufacturing block version
  */
-#define BFA_MFG_VERSION                                2
+#define BFA_MFG_VERSION                                3
 #define BFA_MFG_VERSION_UNINIT                 0xFF
 
 /**
        (type) == BFA_MFG_TYPE_CNA10P1 || \
        bfa_mfg_is_mezz(type)))
 
-#define bfa_mfg_adapter_prop_init_flash(card_type, prop)       \
+#define bfa_mfg_adapter_prop_init_flash_ct(mfgblk, prop)       \
 do {                                                           \
-       switch ((card_type)) {                                  \
-       case BFA_MFG_TYPE_FC8P2:                                \
+       switch ((mfgblk)->card_type) {                          \
        case BFA_MFG_TYPE_JAYHAWK:                              \
        case BFA_MFG_TYPE_ASTRA:                                \
                (prop) = BFI_ADAPTER_SETP(NPORTS, 2) |          \
                        BFI_ADAPTER_SETP(SPEED, 8);             \
                break;                                          \
-       case BFA_MFG_TYPE_FC8P1:                                \
-               (prop) = BFI_ADAPTER_SETP(NPORTS, 1) |          \
-                       BFI_ADAPTER_SETP(SPEED, 8);             \
-               break;                                          \
-       case BFA_MFG_TYPE_FC4P2:                                \
-               (prop) = BFI_ADAPTER_SETP(NPORTS, 2) |          \
-                       BFI_ADAPTER_SETP(SPEED, 4);             \
-               break;                                          \
-       case BFA_MFG_TYPE_FC4P1:                                \
-               (prop) = BFI_ADAPTER_SETP(NPORTS, 1) |          \
-                       BFI_ADAPTER_SETP(SPEED, 4);             \
-               break;                                          \
        case BFA_MFG_TYPE_CNA10P2:                              \
        case BFA_MFG_TYPE_WANCHESE:                             \
        case BFA_MFG_TYPE_LIGHTNING_P0:                         \
 
 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
+static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
-static void bfa_ioc_pf_initfailed(struct bfa_ioc *ioc);
 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
                         u32 boot_param);
        IOC_E_ENABLED           = 5,    /*!< f/w enabled                */
        IOC_E_FWRSP_GETATTR     = 6,    /*!< IOC get attribute response */
        IOC_E_DISABLED          = 7,    /*!< f/w disabled               */
-       IOC_E_INITFAILED        = 8,    /*!< failure notice by iocpf sm */
-       IOC_E_PFFAILED          = 9,    /*!< failure notice by iocpf sm */
-       IOC_E_HBFAIL            = 10,   /*!< heartbeat failure          */
-       IOC_E_HWERROR           = 11,   /*!< hardware error interrupt   */
-       IOC_E_TIMEOUT           = 12,   /*!< timeout                    */
+       IOC_E_PFFAILED          = 8,    /*!< failure notice by iocpf sm */
+       IOC_E_HBFAIL            = 9,    /*!< heartbeat failure          */
+       IOC_E_HWERROR           = 10,   /*!< hardware error interrupt   */
+       IOC_E_TIMEOUT           = 11,   /*!< timeout                    */
+       IOC_E_HWFAILED          = 12,   /*!< PCI mapping failure notice */
 };
 
 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
 
 static struct bfa_sm_table ioc_sm_table[] = {
        {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
        {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
        {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
        {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
+       {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
 };
 
 /**
        IOCPF_E_GETATTRFAIL     = 9,    /*!< init fail notice by ioc sm */
        IOCPF_E_SEMLOCKED       = 10,   /*!< h/w semaphore is locked    */
        IOCPF_E_TIMEOUT         = 11,   /*!< f/w response timeout       */
+       IOCPF_E_SEM_ERROR       = 12,   /*!< h/w sem mapping error      */
 };
 
 /**
                /* !!! fall through !!! */
        case IOC_E_HWERROR:
                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
-               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
                if (event != IOC_E_PFFAILED)
                        bfa_iocpf_initfail(ioc);
                break;
 
+       case IOC_E_HWFAILED:
+               ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+               break;
+
        case IOC_E_DISABLE:
                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
                break;
        case IOC_E_FWRSP_GETATTR:
                del_timer(&ioc->ioc_timer);
                bfa_ioc_check_attr_wwns(ioc);
+               bfa_ioc_hb_monitor(ioc);
                bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
                break;
 
                /* fall through */
        case IOC_E_TIMEOUT:
                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
-               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
                if (event != IOC_E_PFFAILED)
                        bfa_iocpf_getattrfail(ioc);
                break;
 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
 {
        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
-       bfa_ioc_hb_monitor(ioc);
+       bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
 }
 
 static void
                bfa_ioc_hb_stop(ioc);
                /* !!! fall through !!! */
        case IOC_E_HBFAIL:
-               bfa_ioc_fail_notify(ioc);
                if (ioc->iocpf.auto_recover)
                        bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
                else
                        bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 
+               bfa_ioc_fail_notify(ioc);
+
                if (event != IOC_E_PFFAILED)
                        bfa_iocpf_fail(ioc);
                break;
                bfa_iocpf_fail(ioc);
                break;
 
+       case IOC_E_HWFAILED:
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+               bfa_ioc_disable_comp(ioc);
+               break;
+
        default:
                bfa_sm_fault(event);
        }
                 * Initialization retry failed.
                 */
                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
                if (event != IOC_E_PFFAILED)
                        bfa_iocpf_initfail(ioc);
                break;
 
-       case IOC_E_INITFAILED:
-               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+       case IOC_E_HWFAILED:
+               ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
                break;
 
        case IOC_E_ENABLE:
        }
 }
 
+static void
+bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
+{
+}
+
+/**
+ * IOC failure.
+ */
+static void
+bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
+{
+       switch (event) {
+
+       case IOC_E_ENABLE:
+               ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+               break;
+
+       case IOC_E_DISABLE:
+               ioc->cbfn->disable_cbfn(ioc->bfa);
+               break;
+
+       case IOC_E_DETACH:
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+               break;
+
+       default:
+               bfa_sm_fault(event);
+       }
+}
+
 /**
  * IOCPF State Machine
  */
 static void
 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
 {
-       iocpf->retry_count = 0;
+       iocpf->fw_mismatch_notified = false;
        iocpf->auto_recover = bfa_nw_auto_recover;
 }
 
        case IOCPF_E_SEMLOCKED:
                if (bfa_ioc_firmware_lock(ioc)) {
                        if (bfa_ioc_sync_start(ioc)) {
-                               iocpf->retry_count = 0;
                                bfa_ioc_sync_join(ioc);
                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
                        } else {
                }
                break;
 
+       case IOCPF_E_SEM_ERROR:
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+               bfa_ioc_pf_hwfailed(ioc);
+               break;
+
        case IOCPF_E_DISABLE:
                bfa_ioc_hw_sem_get_cancel(ioc);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
 {
        /* Call only the first time sm enters fwmismatch state. */
-       if (iocpf->retry_count == 0)
+       if (iocpf->fw_mismatch_notified == false)
                bfa_ioc_pf_fwmismatch(iocpf->ioc);
 
-       iocpf->retry_count++;
+       iocpf->fw_mismatch_notified = true;
        mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
                msecs_to_jiffies(BFA_IOC_TOV));
 }
                }
                break;
 
+       case IOCPF_E_SEM_ERROR:
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+               bfa_ioc_pf_hwfailed(ioc);
+               break;
+
        case IOCPF_E_DISABLE:
                bfa_ioc_hw_sem_get_cancel(ioc);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
 static void
 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
 {
-       mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
-               msecs_to_jiffies(BFA_IOC_TOV));
+       iocpf->poll_time = 0;
        bfa_ioc_reset(iocpf->ioc, 0);
 }
 
 
        switch (event) {
        case IOCPF_E_FWREADY:
-               del_timer(&ioc->iocpf_timer);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
                break;
 
-       case IOCPF_E_INITFAIL:
-               del_timer(&ioc->iocpf_timer);
-               /*
-                * !!! fall through !!!
-                */
-
        case IOCPF_E_TIMEOUT:
                bfa_nw_ioc_hw_sem_release(ioc);
-               if (event == IOCPF_E_TIMEOUT)
                        bfa_ioc_pf_failed(ioc);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
                break;
 {
        mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
                msecs_to_jiffies(BFA_IOC_TOV));
+       /**
+        * Enable Interrupts before sending fw IOC ENABLE cmd.
+        */
+       iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
        bfa_ioc_send_enable(iocpf->ioc);
 }
 
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
                break;
 
-       case IOCPF_E_FWREADY:
-               bfa_ioc_send_enable(ioc);
-               break;
-
        default:
                bfa_sm_fault(event);
        }
 }
 
-static bool
-bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
-{
-       return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
-}
-
 static void
 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
 {
 static void
 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
 {
-       struct bfa_ioc *ioc = iocpf->ioc;
-
        switch (event) {
        case IOCPF_E_DISABLE:
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
                break;
 
-       case IOCPF_E_FWREADY:
-               bfa_ioc_pf_failed(ioc);
-               if (bfa_nw_ioc_is_operational(ioc))
-                       bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
-               else
-                       bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
-               break;
-
        default:
                bfa_sm_fault(event);
        }
 
        switch (event) {
        case IOCPF_E_FWRSP_DISABLE:
-       case IOCPF_E_FWREADY:
                del_timer(&ioc->iocpf_timer);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
                break;
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
                break;
 
+       case IOCPF_E_SEM_ERROR:
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+               bfa_ioc_pf_hwfailed(ioc);
+               break;
+
        case IOCPF_E_FAIL:
                break;
 
 
        switch (event) {
        case IOCPF_E_ENABLE:
-               iocpf->retry_count = 0;
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
                break;
 
        switch (event) {
        case IOCPF_E_SEMLOCKED:
                bfa_ioc_notify_fail(ioc);
-               bfa_ioc_sync_ack(ioc);
-               iocpf->retry_count++;
-               if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
-                       bfa_ioc_sync_leave(ioc);
-                       bfa_nw_ioc_hw_sem_release(ioc);
-                       bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
-               } else {
-                       if (bfa_ioc_sync_complete(ioc))
-                               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
-                       else {
-                               bfa_nw_ioc_hw_sem_release(ioc);
-                               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
-                       }
-               }
+               bfa_ioc_sync_leave(ioc);
+               writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+               bfa_nw_ioc_hw_sem_release(ioc);
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+               break;
+
+       case IOCPF_E_SEM_ERROR:
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+               bfa_ioc_pf_hwfailed(ioc);
                break;
 
        case IOCPF_E_DISABLE:
 static void
 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
 {
-       bfa_ioc_pf_initfailed(iocpf->ioc);
 }
 
 /**
 
        switch (event) {
        case IOCPF_E_SEMLOCKED:
-               iocpf->retry_count = 0;
                bfa_ioc_sync_ack(ioc);
                bfa_ioc_notify_fail(ioc);
                if (!iocpf->auto_recover) {
                        bfa_ioc_sync_leave(ioc);
+                       writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
                        bfa_nw_ioc_hw_sem_release(ioc);
                        bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
                } else {
                }
                break;
 
+       case IOCPF_E_SEM_ERROR:
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+               bfa_ioc_pf_hwfailed(ioc);
+               break;
+
        case IOCPF_E_DISABLE:
                bfa_ioc_hw_sem_get_cancel(ioc);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
 
        r32 = readl(sem_reg);
 
-       while (r32 && (cnt < BFA_SEM_SPINCNT)) {
+       while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
                cnt++;
                udelay(2);
                r32 = readl(sem_reg);
        }
 
-       if (r32 == 0)
+       if (!(r32 & 1))
                return true;
 
        BUG_ON(!(cnt < BFA_SEM_SPINCNT));
         * will return 1. Semaphore is released by writing 1 to the register
         */
        r32 = readl(ioc->ioc_regs.ioc_sem_reg);
-       if (r32 == 0) {
+       if (r32 == ~0) {
+               bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
+               return;
+       }
+       if (!(r32 & 1)) {
                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
                return;
        }
        int i;
 
        drv_fwhdr = (struct bfi_ioc_image_hdr *)
-               bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+               bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
 
        for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
                if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
 
        bfa_nw_ioc_fwver_get(ioc, &fwhdr);
        drv_fwhdr = (struct bfi_ioc_image_hdr *)
-               bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+               bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
 
        if (fwhdr.signature != drv_fwhdr->signature)
                return false;
 
-       if (swab32(fwhdr.param) != boot_env)
+       if (swab32(fwhdr.bootenv) != boot_env)
                return false;
 
        return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
 
        ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
 
-       boot_env = BFI_BOOT_LOADER_OS;
-
        if (force)
                ioc_fwstate = BFI_IOC_UNINIT;
 
+       boot_env = BFI_FWBOOT_ENV_OS;
+
        /**
         * check if firmware is valid
         */
                false : bfa_ioc_fwver_valid(ioc, boot_env);
 
        if (!fwvalid) {
-               bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
+               bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
+               bfa_ioc_poll_fwinit(ioc);
                return;
        }
 
         * just wait for an initialization completion interrupt.
         */
        if (ioc_fwstate == BFI_IOC_INITING) {
-               ioc->cbfn->reset_cbfn(ioc->bfa);
+               bfa_ioc_poll_fwinit(ioc);
                return;
        }
 
                 * be flushed. Otherwise MSI-X interrupts are not delivered.
                 */
                bfa_ioc_msgflush(ioc);
-               ioc->cbfn->reset_cbfn(ioc->bfa);
                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
                return;
        }
        /**
         * Initialize the h/w for any other states.
         */
-       bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
+       bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
+       bfa_ioc_poll_fwinit(ioc);
 }
 
 void
 
        bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
                    bfa_ioc_portid(ioc));
-       enable_req.ioc_class = ioc->ioc_mc;
+       enable_req.clscode = htons(ioc->clscode);
        do_gettimeofday(&tv);
        enable_req.tv_sec = ntohl(tv.tv_sec);
        bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
        u32 loff = 0;
        u32 chunkno = 0;
        u32 i;
+       u32 asicmode;
 
        /**
         * Initialize LMEM first before code download
         */
        bfa_ioc_lmem_init(ioc);
 
-       fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
+       fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
 
        pgnum = bfa_ioc_smem_pgnum(ioc, loff);
 
        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 
-       for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
+       for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
                if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
                        chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
-                       fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
+                       fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
                                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
                }
 
                      ioc->ioc_regs.host_page_num_fn);
 
        /*
-        * Set boot type and boot param at the end.
+        * Set boot type, env and device mode at the end.
        */
+       asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
+                                       ioc->port0_mode, ioc->port1_mode);
+       writel(asicmode, ((ioc->ioc_regs.smem_page_start)
+                       + BFI_FWBOOT_DEVMODE_OFF));
        writel(boot_type, ((ioc->ioc_regs.smem_page_start)
-                       + (BFI_BOOT_TYPE_OFF)));
+                       + (BFI_FWBOOT_TYPE_OFF)));
        writel(boot_env, ((ioc->ioc_regs.smem_page_start)
-                       + (BFI_BOOT_LOADER_OFF)));
+                       + (BFI_FWBOOT_ENV_OFF)));
 }
 
 static void
        bfa_ioc_hwinit(ioc, force);
 }
 
+/**
+ * BFA ioc enable reply by firmware
+ */
+static void
+bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
+                       u8 cap_bm)
+{
+       struct bfa_iocpf *iocpf = &ioc->iocpf;
+
+       ioc->port_mode = ioc->port_mode_cfg = port_mode;
+       ioc->ad_cap_bm = cap_bm;
+       bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
+}
+
 /**
  * @brief
  * Update BFA configuration from firmware configuration.
 {
        struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
        struct bfa_mbox_cmd *cmd;
-       u32                     stat;
+       bfa_mbox_cmd_cbfn_t cbfn;
+       void *cbarg;
+       u32 stat;
 
        /**
         * If no command pending, do nothing
         */
        bfa_q_deq(&mod->cmd_q, &cmd);
        bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+
+       /**
+        * Give a callback to the client, indicating that the command is sent
+        */
+       if (cmd->cbfn) {
+               cbfn = cmd->cbfn;
+               cbarg = cmd->cbarg;
+               cmd->cbfn = NULL;
+               cbfn(cbarg);
+       }
 }
 
 /**
 }
 
 static void
-bfa_ioc_pf_initfailed(struct bfa_ioc *ioc)
+bfa_ioc_pf_failed(struct bfa_ioc *ioc)
 {
-       bfa_fsm_send_event(ioc, IOC_E_INITFAILED);
+       bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
 }
 
 static void
-bfa_ioc_pf_failed(struct bfa_ioc *ioc)
+bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
 {
-       bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
+       bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
 }
 
 static void
  * as the entry vector.
  */
 static void
-bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
+bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
+               u32 boot_env)
 {
-       void __iomem *rb;
-
        bfa_ioc_stats(ioc, ioc_boots);
 
        if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
        /**
         * Initialize IOC state of all functions on a chip reset.
         */
-       rb = ioc->pcidev.pci_bar_kva;
-       if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
-               writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
-               writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
+       if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
+               writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
+               writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
        } else {
-               writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
-               writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
+               writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
+               writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
        }
 
        bfa_ioc_msgflush(ioc);
        bfa_ioc_download_fw(ioc, boot_type, boot_env);
-
-       /**
-        * Enable interrupts just before starting LPU
-        */
-       ioc->cbfn->reset_cbfn(ioc->bfa);
        bfa_ioc_lpu_start(ioc);
 }
 
        bfa_nw_auto_recover = auto_recover;
 }
 
-static void
+static bool
 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
 {
        u32     *msgp = mbmsg;
        u32     r32;
        int             i;
 
+       r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
+       if ((r32 & 1) == 0)
+               return false;
+
        /**
         * read the MBOX msg
         */
         */
        writel(1, ioc->ioc_regs.lpu_mbox_cmd);
        readl(ioc->ioc_regs.lpu_mbox_cmd);
+
+       return true;
 }
 
 static void
        case BFI_IOC_I2H_HBEAT:
                break;
 
-       case BFI_IOC_I2H_READY_EVENT:
-               bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
-               break;
-
        case BFI_IOC_I2H_ENABLE_REPLY:
-               bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
+               bfa_ioc_enable_reply(ioc,
+                       (enum bfa_mode)msg->fw_event.port_mode,
+                       msg->fw_event.cap_bm);
                break;
 
        case BFI_IOC_I2H_DISABLE_REPLY:
 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
 {
        bfa_fsm_send_event(ioc, IOC_E_DETACH);
+
+       /* Done with detach, empty the notify_q. */
+       INIT_LIST_HEAD(&ioc->notify_q);
 }
 
 /**
  */
 void
 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
-                enum bfi_mclass mc)
+                enum bfi_pcifn_class clscode)
 {
-       ioc->ioc_mc     = mc;
+       ioc->clscode    = clscode;
        ioc->pcidev     = *pcidev;
-       ioc->ctdev      = bfa_asic_id_ct(ioc->pcidev.device_id);
-       ioc->cna        = ioc->ctdev && !ioc->fcmode;
+
+       /**
+        * Initialize IOC and device personality
+        */
+       ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
+       ioc->asic_mode  = BFI_ASIC_MODE_FC;
+
+       switch (pcidev->device_id) {
+       case PCI_DEVICE_ID_BROCADE_CT:
+               ioc->asic_gen = BFI_ASIC_GEN_CT;
+               ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
+               ioc->asic_mode  = BFI_ASIC_MODE_ETH;
+               ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
+               ioc->ad_cap_bm = BFA_CM_CNA;
+               break;
+
+       default:
+               BUG_ON(1);
+       }
 
        bfa_nw_ioc_set_ct_hwif(ioc);
 
        struct bfi_mbmsg m;
        int                             mc;
 
-       bfa_ioc_msgget(ioc, &m);
+       if (bfa_ioc_msgget(ioc, &m)) {
+               /**
+                * Treat IOC message class as special.
+                */
+               mc = m.mh.msg_class;
+               if (mc == BFI_MC_IOC) {
+                       bfa_ioc_isr(ioc, &m);
+                       return;
+               }
 
-       /**
-        * Treat IOC message class as special.
-        */
-       mc = m.mh.msg_class;
-       if (mc == BFI_MC_IOC) {
-               bfa_ioc_isr(ioc, &m);
-               return;
+               if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
+                       return;
+
+               mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
        }
 
-       if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
-               return;
+       bfa_ioc_lpu_read_stat(ioc);
 
-       mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
+       /**
+        * Try to send pending mailbox commands
+        */
+       bfa_ioc_mbox_poll(ioc);
 }
 
 void
        ad_attr->asic_rev = ioc_attr->asic_rev;
 
        bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
-
-       ad_attr->cna_capable = ioc->cna;
-       ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
 }
 
 static enum bfa_ioc_type
 bfa_ioc_get_type(struct bfa_ioc *ioc)
 {
-       if (!ioc->ctdev || ioc->fcmode)
-               return BFA_IOC_TYPE_FC;
-       else if (ioc->ioc_mc == BFI_MC_IOCFC)
-               return BFA_IOC_TYPE_FCoE;
-       else if (ioc->ioc_mc == BFI_MC_LL)
-               return BFA_IOC_TYPE_LL;
-       else {
-               BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
+       if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
                return BFA_IOC_TYPE_LL;
-       }
+
+       BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
+
+       return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
+               ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
 }
 
 static void
 
        ioc_attr->state = bfa_ioc_get_state(ioc);
        ioc_attr->port_id = ioc->port_id;
+       ioc_attr->port_mode = ioc->port_mode;
+
+       ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
+       ioc_attr->cap_bm = ioc->ad_cap_bm;
 
        ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
 
 bfa_nw_iocpf_timeout(void *ioc_arg)
 {
        struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
+       enum bfa_iocpf_state iocpf_st;
+
+       iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
 
-       bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
+       if (iocpf_st == BFA_IOCPF_HWINIT)
+               bfa_ioc_poll_fwinit(ioc);
+       else
+               bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
 }
 
 void
 
        bfa_ioc_hw_sem_get(ioc);
 }
+
+static void
+bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
+{
+       u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+       if (fwstate == BFI_IOC_DISABLED) {
+               bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
+               return;
+       }
+
+       if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
+               bfa_nw_iocpf_timeout(ioc);
+       } else {
+               ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
+               mod_timer(&ioc->iocpf_timer, jiffies +
+                       msecs_to_jiffies(BFA_IOC_POLL_TOV));
+       }
+}
 
 #define BFA_IOC_HWSEM_TOV      500     /* msecs */
 #define BFA_IOC_HB_TOV         500     /* msecs */
 #define BFA_IOC_HWINIT_MAX     5
+#define BFA_IOC_POLL_TOV       200     /* msecs */
 
 /**
  * PCI device information required by IOC
 struct bfa_iocpf {
        bfa_fsm_t               fsm;
        struct bfa_ioc          *ioc;
-       u32                     retry_count;
+       bool                    fw_mismatch_notified;
        bool                    auto_recover;
+       u32                     poll_time;
 };
 
 struct bfa_ioc {
        void                    *dbg_fwsave;
        int                     dbg_fwsave_len;
        bool                    dbg_fwsave_once;
-       enum bfi_mclass         ioc_mc;
+       enum bfi_pcifn_class    clscode;
        struct bfa_ioc_regs     ioc_regs;
        struct bfa_ioc_drv_stats stats;
        bool                    fcmode;
-       bool                    ctdev;
-       bool                    cna;
        bool                    pllinit;
        bool                    stats_busy;     /*!< outstanding stats */
        u8                      port_id;
        struct bfa_ioc_mbox_mod mbox_mod;
        struct bfa_ioc_hwif     *ioc_hwif;
        struct bfa_iocpf        iocpf;
+       enum bfi_asic_gen       asic_gen;
+       enum bfi_asic_mode      asic_mode;
+       enum bfi_port_mode      port0_mode;
+       enum bfi_port_mode      port1_mode;
+       enum bfa_mode           port_mode;
+       u8                      ad_cap_bm;      /*!< adapter cap bit mask */
+       u8                      port_mode_cfg;  /*!< config port mode */
 };
 
 struct bfa_ioc_hwif {
-       enum bfa_status (*ioc_pll_init) (void __iomem *rb, bool fcmode);
+       enum bfa_status (*ioc_pll_init) (void __iomem *rb,
+                                               enum bfi_asic_mode m);
        bool            (*ioc_firmware_lock)    (struct bfa_ioc *ioc);
        void            (*ioc_firmware_unlock)  (struct bfa_ioc *ioc);
        void            (*ioc_reg_init) (struct bfa_ioc *ioc);
        void            (*ioc_sync_leave)       (struct bfa_ioc *ioc);
        void            (*ioc_sync_ack)         (struct bfa_ioc *ioc);
        bool            (*ioc_sync_complete)    (struct bfa_ioc *ioc);
+       bool            (*ioc_lpu_read_stat)    (struct bfa_ioc *ioc);
 };
 
 #define bfa_ioc_pcifn(__ioc)           ((__ioc)->pcidev.pci_func)
 #define bfa_ioc_devid(__ioc)           ((__ioc)->pcidev.device_id)
 #define bfa_ioc_bar0(__ioc)            ((__ioc)->pcidev.pci_bar_kva)
 #define bfa_ioc_portid(__ioc)          ((__ioc)->port_id)
+#define bfa_ioc_asic_gen(__ioc)                ((__ioc)->asic_gen)
 #define bfa_ioc_fetch_stats(__ioc, __stats) \
                (((__stats)->drv_stats) = (__ioc)->stats)
 #define bfa_ioc_clr_stats(__ioc)       \
         (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) :     \
         BFI_IMAGE_CB_FC)
 #define BFA_IOC_FW_SMEM_SIZE(__ioc)                                    \
-       (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
+       ((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB)                   \
+       ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE)
 #define BFA_IOC_FLASH_CHUNK_NO(off)            (off / BFI_FLASH_CHUNK_SZ_WORDS)
 #define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off)     (off % BFI_FLASH_CHUNK_SZ_WORDS)
 #define BFA_IOC_FLASH_CHUNK_ADDR(chunkno)  (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
 
 #define bfa_ioc_pll_init_asic(__ioc) \
        ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
-                          (__ioc)->fcmode))
+                          (__ioc)->asic_mode))
 
 #define        bfa_ioc_isr_mode_set(__ioc, __msix)                     \
                        ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
 #define        bfa_ioc_ownership_reset(__ioc)                          \
                        ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
 
+#define bfa_ioc_lpu_read_stat(__ioc) do {                              \
+               if ((__ioc)->ioc_hwif->ioc_lpu_read_stat)               \
+                       ((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc));  \
+} while (0)
+
 void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc);
 
 void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
 void bfa_nw_ioc_auto_recover(bool auto_recover);
 void bfa_nw_ioc_detach(struct bfa_ioc *ioc);
 void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
-               enum bfi_mclass mc);
+               enum bfi_pcifn_class clscode);
 u32 bfa_nw_ioc_meminfo(void);
 void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa);
 void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
 /*
  * F/W Image Size & Chunk
  */
-u32 *bfa_cb_image_get_chunk(int type, u32 off);
-u32 bfa_cb_image_get_size(int type);
+u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off);
+u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen);
 
 #endif /* __BFA_IOC_H__ */
 
 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
-static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
+static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
+                               enum bfi_asic_mode asic_mode);
 
 static struct bfa_ioc_hwif nw_hwif_ct;
 
        /**
         * If bios boot (flash based) -- do not increment usage count
         */
-       if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
+       if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
                                                BFA_IOC_FWIMG_MINSZ)
                return true;
 
        /**
         * If bios boot (flash based) -- do not decrement usage count
         */
-       if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
+       if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
                                                BFA_IOC_FWIMG_MINSZ)
                return;
 
 static void
 bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
 {
-       if (ioc->cna) {
-               writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
-               writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
-               /* Wait for halt to take effect */
-               readl(ioc->ioc_regs.ll_halt);
-               readl(ioc->ioc_regs.alt_ll_halt);
-       } else {
-               writel(~0U, ioc->ioc_regs.err_set);
-               readl(ioc->ioc_regs.err_set);
-       }
+       writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
+       writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
+       /* Wait for halt to take effect */
+       readl(ioc->ioc_regs.ll_halt);
+       readl(ioc->ioc_regs.alt_ll_halt);
 }
 
 /**
  * Host to LPU mailbox message addresses
  */
-static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
+static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
        { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
        { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
        { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
 
        rb = bfa_ioc_bar0(ioc);
 
-       ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
-       ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
-       ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
+       ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
+       ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
+       ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
 
        if (ioc->port_id == 0) {
                ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
 static void
 bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
 {
-       if (ioc->cna) {
-               bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
-               writel(0, ioc->ioc_regs.ioc_usage_reg);
-               bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
-       }
+       bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+       writel(0, ioc->ioc_regs.ioc_usage_reg);
+       bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
 
        /*
         * Read the hw sem reg to make sure that it is locked
 }
 
 static enum bfa_status
-bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
+bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
 {
        u32     pll_sclk, pll_fclk, r32;
+       bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
 
        pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
                __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
 
        u8              msg_id;         /*!< msg opcode with in the class   */
        union {
                struct {
-                       u8      rsvd;
-                       u8      lpu_id; /*!< msg destination                */
+                       u8      qid;
+                       u8      fn_lpu; /*!< msg destination                */
                } h2i;
                u16     i2htok; /*!< token in msgs to host          */
        } mtag;
 };
 
-#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do {                \
+#define bfi_fn_lpu(__fn, __lpu)        ((__fn) << 1 | (__lpu))
+#define bfi_mhdr_2_fn(_mh)     ((_mh)->mtag.h2i.fn_lpu >> 1)
+#define bfi_mhdr_2_qid(_mh)    ((_mh)->mtag.h2i.qid)
+
+#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do {               \
        (_mh).msg_class                 = (_mc);                \
        (_mh).msg_id                    = (_op);                \
-       (_mh).mtag.h2i.lpu_id   = (_lpuid);                     \
+       (_mh).mtag.h2i.fn_lpu   = (_fn_lpu);                    \
 } while (0)
 
 #define bfi_i2h_set(_mh, _mc, _op, _i2htok) do {               \
        u32             pl[BFI_MBMSG_SZ];
 };
 
+/**
+ * Supported PCI function class codes (personality)
+ */
+enum bfi_pcifn_class {
+       BFI_PCIFN_CLASS_FC      = 0x0c04,
+       BFI_PCIFN_CLASS_ETH     = 0x0200,
+};
+
 /**
  * Message Classes
  */
  *----------------------------------------------------------------------
  */
 
+/**
+ * Different asic generations
+ */
+enum bfi_asic_gen {
+       BFI_ASIC_GEN_CB         = 1,
+       BFI_ASIC_GEN_CT         = 2,
+};
+
+enum bfi_asic_mode {
+       BFI_ASIC_MODE_FC        = 1,    /* FC upto 8G speed             */
+       BFI_ASIC_MODE_FC16      = 2,    /* FC upto 16G speed            */
+       BFI_ASIC_MODE_ETH       = 3,    /* Ethernet ports               */
+       BFI_ASIC_MODE_COMBO     = 4,    /* FC 16G and Ethernet 10G port */
+};
+
 enum bfi_ioc_h2i_msgs {
        BFI_IOC_H2I_ENABLE_REQ          = 1,
        BFI_IOC_H2I_DISABLE_REQ         = 2,
        BFI_IOC_I2H_ENABLE_REPLY        = BFA_I2HM(1),
        BFI_IOC_I2H_DISABLE_REPLY       = BFA_I2HM(2),
        BFI_IOC_I2H_GETATTR_REPLY       = BFA_I2HM(3),
-       BFI_IOC_I2H_READY_EVENT         = BFA_I2HM(4),
-       BFI_IOC_I2H_HBEAT               = BFA_I2HM(5),
+       BFI_IOC_I2H_HBEAT               = BFA_I2HM(4),
 };
 
 /**
        u64             mfg_pwwn;       /*!< Mfg port wwn          */
        u64             mfg_nwwn;       /*!< Mfg node wwn          */
        mac_t           mfg_mac;        /*!< Mfg mac               */
-       u16     rsvd_a;
+       u8              port_mode;      /* enum bfi_port_mode      */
+       u8              rsvd_a;
        u64             pwwn;
        u64             nwwn;
        mac_t           mac;            /*!< PBC or Mfg mac        */
 #define BFI_IOC_MD5SUM_SZ      4
 struct bfi_ioc_image_hdr {
        u32     signature;      /*!< constant signature */
-       u32     rsvd_a;
+       u8      asic_gen;       /*!< asic generation */
+       u8      asic_mode;
+       u8      port0_mode;     /*!< device mode for port 0 */
+       u8      port1_mode;     /*!< device mode for port 1 */
        u32     exec;           /*!< exec vector        */
-       u32     param;          /*!< parameters         */
+       u32     bootenv;        /*!< firmware boot env */
        u32     rsvd_b[4];
        u32     md5sum[BFI_IOC_MD5SUM_SZ];
 };
 
+#define BFI_FWBOOT_DEVMODE_OFF         4
+#define BFI_FWBOOT_TYPE_OFF            8
+#define BFI_FWBOOT_ENV_OFF             12
+#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \
+       (((u32)(__asic_gen)) << 24 |    \
+        ((u32)(__asic_mode)) << 16 |   \
+        ((u32)(__p0_mode)) << 8 |      \
+        ((u32)(__p1_mode)))
+
 enum bfi_fwboot_type {
        BFI_FWBOOT_TYPE_NORMAL  = 0,
        BFI_FWBOOT_TYPE_FLASH   = 1,
        BFI_FWBOOT_TYPE_MEMTEST = 2,
 };
 
+enum bfi_port_mode {
+       BFI_PORT_MODE_FC        = 1,
+       BFI_PORT_MODE_ETH       = 2,
+};
+
 /**
  *  BFI_IOC_I2H_READY_EVENT message
  */
  */
 struct bfi_ioc_ctrl_req {
        struct bfi_mhdr mh;
-       u8                      ioc_class;
-       u8                      rsvd[3];
+       u16                     clscode;
+       u16                     rsvd;
        u32             tv_sec;
 };
 
  * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
  */
 struct bfi_ioc_ctrl_reply {
-       struct bfi_mhdr mh;             /*!< Common msg header     */
+       struct bfi_mhdr mh;                     /*!< Common msg header     */
        u8                      status;         /*!< enable/disable status */
-       u8                      rsvd[3];
+       u8                      port_mode;      /*!< enum bfa_mode */
+       u8                      cap_bm;         /*!< capability bit mask */
+       u8                      rsvd;
 };
 
 #define BFI_IOC_MSGSZ   8
  */
 union bfi_ioc_i2h_msg_u {
        struct bfi_mhdr mh;
-       struct bfi_ioc_rdy_event rdy_event;
+       struct bfi_ioc_ctrl_reply fw_event;
        u32                     mboxmsg[BFI_IOC_MSGSZ];
 };
 
 
        (_qe)->cbarg = (_cbarg);                                        \
 } while (0)
 
-#define bna_is_small_rxq(rcb) ((rcb)->id == 1)
+#define bna_is_small_rxq(_id) ((_id) & 0x1)
 
 #define BNA_MAC_IS_EQUAL(_mac1, _mac2)                                 \
        (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
        }                                                               \
 } while (0)
 
-#define        call_rxf_stop_cbfn(rxf, status)                                 \
+#define        call_rxf_stop_cbfn(rxf)                                         \
+do {                                                                   \
        if ((rxf)->stop_cbfn) {                                         \
-               (*(rxf)->stop_cbfn)((rxf)->stop_cbarg, (status));       \
+               void (*cbfn)(struct bna_rx *);                  \
+               struct bna_rx *cbarg;                                   \
+               cbfn = (rxf)->stop_cbfn;                                \
+               cbarg = (rxf)->stop_cbarg;                              \
                (rxf)->stop_cbfn = NULL;                                \
                (rxf)->stop_cbarg = NULL;                               \
-       }
+               cbfn(cbarg);                                            \
+       }                                                               \
+} while (0)
 
-#define        call_rxf_start_cbfn(rxf, status)                                \
+#define        call_rxf_start_cbfn(rxf)                                        \
+do {                                                                   \
        if ((rxf)->start_cbfn) {                                        \
-               (*(rxf)->start_cbfn)((rxf)->start_cbarg, (status));     \
+               void (*cbfn)(struct bna_rx *);                  \
+               struct bna_rx *cbarg;                                   \
+               cbfn = (rxf)->start_cbfn;                               \
+               cbarg = (rxf)->start_cbarg;                             \
                (rxf)->start_cbfn = NULL;                               \
                (rxf)->start_cbarg = NULL;                              \
-       }
+               cbfn(cbarg);                                            \
+       }                                                               \
+} while (0)
 
-#define        call_rxf_cam_fltr_cbfn(rxf, status)                             \
+#define        call_rxf_cam_fltr_cbfn(rxf)                                     \
+do {                                                                   \
        if ((rxf)->cam_fltr_cbfn) {                                     \
-               (*(rxf)->cam_fltr_cbfn)((rxf)->cam_fltr_cbarg, rxf->rx, \
-                                       (status));                      \
+               void (*cbfn)(struct bnad *, struct bna_rx *);   \
+               struct bnad *cbarg;                                     \
+               cbfn = (rxf)->cam_fltr_cbfn;                            \
+               cbarg = (rxf)->cam_fltr_cbarg;                          \
                (rxf)->cam_fltr_cbfn = NULL;                            \
                (rxf)->cam_fltr_cbarg = NULL;                           \
-       }
+               cbfn(cbarg, rxf->rx);                                   \
+       }                                                               \
+} while (0)
 
-#define        call_rxf_pause_cbfn(rxf, status)                                \
+#define        call_rxf_pause_cbfn(rxf)                                        \
+do {                                                                   \
        if ((rxf)->oper_state_cbfn) {                                   \
-               (*(rxf)->oper_state_cbfn)((rxf)->oper_state_cbarg, rxf->rx,\
-                                       (status));                      \
-               (rxf)->rxf_flags &= ~BNA_RXF_FL_OPERSTATE_CHANGED;      \
+               void (*cbfn)(struct bnad *, struct bna_rx *);   \
+               struct bnad *cbarg;                                     \
+               cbfn = (rxf)->oper_state_cbfn;                          \
+               cbarg = (rxf)->oper_state_cbarg;                        \
                (rxf)->oper_state_cbfn = NULL;                          \
                (rxf)->oper_state_cbarg = NULL;                         \
-       }
+               cbfn(cbarg, rxf->rx);                                   \
+       }                                                               \
+} while (0)
 
-#define        call_rxf_resume_cbfn(rxf, status) call_rxf_pause_cbfn(rxf, status)
+#define        call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf)
 
 #define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
 
        }                                                               \
 } while (0)
 
+#define bna_tx_rid_mask(_bna) ((_bna)->tx_mod.rid_mask)
+
+#define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask)
+
+#define bna_tx_from_rid(_bna, _rid, _tx)                               \
+do {                                                               \
+       struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod;    \
+       struct bna_tx *__tx;                                        \
+       struct list_head *qe;                                      \
+       _tx = NULL;                                                  \
+       list_for_each(qe, &__tx_mod->tx_active_q) {                  \
+               __tx = (struct bna_tx *)qe;                          \
+               if (__tx->rid == (_rid)) {                            \
+                       (_tx) = __tx;                              \
+                       break;                                    \
+               }                                                      \
+       }                                                              \
+} while (0)
+
+#define bna_rx_from_rid(_bna, _rid, _rx)                               \
+do {                                                                   \
+       struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod;                  \
+       struct bna_rx *__rx;                                            \
+       struct list_head *qe;                                           \
+       _rx = NULL;                                                     \
+       list_for_each(qe, &__rx_mod->rx_active_q) {                     \
+               __rx = (struct bna_rx *)qe;                             \
+               if (__rx->rid == (_rid)) {                              \
+                       (_rx) = __rx;                                   \
+                       break;                                          \
+               }                                                       \
+       }                                                               \
+} while (0)
+
+/**
+ *
+ *  Inline functions
+ *
+ */
+
+static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
+{
+       struct bna_mac *mac = NULL;
+       struct list_head *qe;
+       list_for_each(qe, q) {
+               if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) {
+                       mac = (struct bna_mac *)qe;
+                       break;
+               }
+       }
+       return mac;
+}
+
+#define bna_attr(_bna) (&(_bna)->ioceth.attr)
+
 /**
  *
  * Function prototypes
  * BNA
  */
 
+/* FW response handlers */
+void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr);
+
 /* APIs for BNAD */
 void bna_res_req(struct bna_res_info *res_info);
+void bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info);
 void bna_init(struct bna *bna, struct bnad *bnad,
                        struct bfa_pcidev *pcidev,
                        struct bna_res_info *res_info);
+void bna_mod_init(struct bna *bna, struct bna_res_info *res_info);
 void bna_uninit(struct bna *bna);
+int bna_num_txq_set(struct bna *bna, int num_txq);
+int bna_num_rxp_set(struct bna *bna, int num_rxp);
 void bna_stats_get(struct bna *bna);
 void bna_get_perm_mac(struct bna *bna, u8 *mac);
+void bna_hw_stats_get(struct bna *bna);
 
 /* APIs for Rx */
 int bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size);
 struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
 void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
                          struct bna_mac *mac);
+struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
+void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
+                         struct bna_mcam_handle *handle);
 struct bna_rit_segment *
 bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size);
 void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
 void bna_port_cb_rx_stopped(struct bna_port *port,
                            enum bna_cb_status status);
 
+/**
+ * ETHPORT
+ */
+
+/* Callbacks for RX */
+void bna_ethport_cb_rx_started(struct bna_ethport *ethport);
+void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport);
+
 /**
  * IB
  */
 /**
  * TX MODULE AND TX
  */
+/* FW response handelrs */
+void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx,
+                              struct bfi_msgq_mhdr *msghdr);
+void bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx,
+                             struct bfi_msgq_mhdr *msghdr);
+void bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod);
 
 /* APIs for BNA */
 void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
 void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);
 int bna_tx_state_get(struct bna_tx *tx);
 
-/* APIs for PORT */
+/* APIs for ENET */
 void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
 void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
 void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);
 void bna_tx_destroy(struct bna_tx *tx);
 void bna_tx_enable(struct bna_tx *tx);
 void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
-                   void (*cbfn)(void *, struct bna_tx *,
-                                enum bna_cb_status));
+                   void (*cbfn)(void *, struct bna_tx *));
+void bna_tx_cleanup_complete(struct bna_tx *tx);
 void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
 
 /**
 void rxf_reset_packet_filter_default(struct bna_rxf *rxf);
 void rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf);
 
+/* FW response handlers */
+void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx,
+                              struct bfi_msgq_mhdr *msghdr);
+void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx,
+                             struct bfi_msgq_mhdr *msghdr);
+void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr);
+void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
+                              struct bfi_msgq_mhdr *msghdr);
+
 /* APIs for BNA */
 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
                     struct bna_res_info *res_info);
 int bna_rx_state_get(struct bna_rx *rx);
 int bna_rxf_state_get(struct bna_rxf *rxf);
 
-/* APIs for PORT */
+/* APIs for ENET */
 void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
 void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
 void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);
 void bna_rx_destroy(struct bna_rx *rx);
 void bna_rx_enable(struct bna_rx *rx);
 void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
-                   void (*cbfn)(void *, struct bna_rx *,
-                                enum bna_cb_status));
+                   void (*cbfn)(void *, struct bna_rx *));
+void bna_rx_cleanup_complete(struct bna_rx *rx);
 void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
 void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
 void bna_rx_dim_update(struct bna_ccb *ccb);
 enum bna_cb_status
 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
-                void (*cbfn)(struct bnad *, struct bna_rx *,
-                             enum bna_cb_status));
+                void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
+bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
+                void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
+bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
+                void (*cbfn)(struct bnad *, struct bna_rx *));
 enum bna_cb_status
 bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
-                void (*cbfn)(struct bnad *, struct bna_rx *,
-                             enum bna_cb_status));
+                void (*cbfn)(struct bnad *, struct bna_rx *));
 enum bna_cb_status
 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
-                    void (*cbfn)(struct bnad *, struct bna_rx *,
-                                 enum bna_cb_status));
+                    void (*cbfn)(struct bnad *, struct bna_rx *));
 enum bna_cb_status
 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
                enum bna_rxmode bitmask,
-               void (*cbfn)(struct bnad *, struct bna_rx *,
-                            enum bna_cb_status));
+               void (*cbfn)(struct bnad *, struct bna_rx *));
 void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
 void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
 void bna_rx_vlanfilter_enable(struct bna_rx *rx);
-void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config,
-                      void (*cbfn)(struct bnad *, struct bna_rx *,
-                                   enum bna_cb_status));
+void bna_rx_hds_enable(struct bna_rx *rx, struct bna_hds_config *hds_config,
+                      void (*cbfn)(struct bnad *, struct bna_rx *));
 void bna_rx_hds_disable(struct bna_rx *rx,
-                       void (*cbfn)(struct bnad *, struct bna_rx *,
-                                    enum bna_cb_status));
+                       void (*cbfn)(struct bnad *, struct bna_rx *));
+
+/**
+ * ENET
+ */
+
+/* API for RX */
+int bna_enet_mtu_get(struct bna_enet *enet);
+
+/* Callbacks for TX, RX */
+void bna_enet_cb_tx_stopped(struct bna_enet *enet);
+void bna_enet_cb_rx_stopped(struct bna_enet *enet);
+
+/* API for BNAD */
+void bna_enet_enable(struct bna_enet *enet);
+void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
+                     void (*cbfn)(void *));
+void bna_enet_pause_config(struct bna_enet *enet,
+                          struct bna_pause_config *pause_config,
+                          void (*cbfn)(struct bnad *));
+void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
+                     void (*cbfn)(struct bnad *));
+void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
+
+/**
+ * IOCETH
+ */
+
+/* APIs for BNAD */
+void bna_ioceth_enable(struct bna_ioceth *ioceth);
+void bna_ioceth_disable(struct bna_ioceth *ioceth,
+                       enum bna_cleanup_type type);
 
 /**
  * BNAD
  */
 
+/* Callbacks for ENET */
+void bnad_cb_ethport_link_status(struct bnad *bnad,
+                             enum bna_link_status status);
+
+/* Callbacks for IOCETH */
+void bnad_cb_ioceth_ready(struct bnad *bnad);
+void bnad_cb_ioceth_failed(struct bnad *bnad);
+void bnad_cb_ioceth_disabled(struct bnad *bnad);
+void bnad_cb_mbox_intr_enable(struct bnad *bnad);
+void bnad_cb_mbox_intr_disable(struct bnad *bnad);
+
 /* Callbacks for BNA */
 void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
                       struct bna_stats *stats);
 
 #define __BNA_TYPES_H__
 
 #include "cna.h"
-#include "bna_hw.h"
+#include "bna_hw_defs.h"
 #include "bfa_cee.h"
+#include "bfi_enet.h"
+#include "bfa_msgq.h"
 
 /**
  *
  *
  */
 
+struct bna_mcam_handle;
 struct bna_txq;
 struct bna_tx;
 struct bna_rxq;
 struct bna_rx;
 struct bna_rxf;
 struct bna_port;
+struct bna_enet;
 struct bna;
 struct bnad;
 
        BNA_RES_T_MAX
 };
 
+enum bna_mod_res_req_type {
+       BNA_MOD_RES_MEM_T_TX_ARRAY      = 0,
+       BNA_MOD_RES_MEM_T_TXQ_ARRAY     = 1,
+       BNA_MOD_RES_MEM_T_RX_ARRAY      = 2,
+       BNA_MOD_RES_MEM_T_RXP_ARRAY     = 3,
+       BNA_MOD_RES_MEM_T_RXQ_ARRAY     = 4,
+       BNA_MOD_RES_MEM_T_UCMAC_ARRAY   = 5,
+       BNA_MOD_RES_MEM_T_MCMAC_ARRAY   = 6,
+       BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY = 7,
+       BNA_MOD_RES_T_MAX
+};
+
 enum bna_tx_res_req_type {
        BNA_TX_RES_MEM_T_TCB    = 0,
        BNA_TX_RES_MEM_T_UNMAPQ = 1,
        BNA_TX_RES_MEM_T_QPT    = 2,
        BNA_TX_RES_MEM_T_SWQPT  = 3,
        BNA_TX_RES_MEM_T_PAGE   = 4,
-       BNA_TX_RES_INTR_T_TXCMPL = 5,
+       BNA_TX_RES_MEM_T_IBIDX  = 5,
+       BNA_TX_RES_INTR_T_TXCMPL = 6,
        BNA_TX_RES_T_MAX,
 };
 
        BNA_RX_RES_MEM_T_DSWQPT         = 9,    /* RX s/w QPT */
        BNA_RX_RES_MEM_T_DPAGE          = 10,   /* RX s/w QPT */
        BNA_RX_RES_MEM_T_HPAGE          = 11,   /* RX s/w QPT */
-       BNA_RX_RES_T_INTR               = 12,   /* Rx interrupts */
-       BNA_RX_RES_T_MAX                = 13
+       BNA_RX_RES_MEM_T_IBIDX          = 12,
+       BNA_RX_RES_MEM_T_RIT            = 13,
+       BNA_RX_RES_T_INTR               = 14,   /* Rx interrupts */
+       BNA_RX_RES_T_MAX                = 15
 };
 
 enum bna_mbox_state {
 };
 
 enum bna_tx_flags {
-       BNA_TX_F_PORT_STARTED   = 1,
+       BNA_TX_F_ENET_STARTED   = 1,
        BNA_TX_F_ENABLED        = 2,
-       BNA_TX_F_PRIO_LOCK      = 4,
+       BNA_TX_F_PRIO_CHANGED   = 4,
+       BNA_TX_F_BW_UPDATED     = 8,
 };
 
 enum bna_tx_mod_flags {
-       BNA_TX_MOD_F_PORT_STARTED       = 1,
-       BNA_TX_MOD_F_PORT_LOOPBACK      = 2,
+       BNA_TX_MOD_F_ENET_STARTED       = 1,
+       BNA_TX_MOD_F_ENET_LOOPBACK      = 2,
 };
 
 enum bna_rx_type {
 
 enum bna_rxmode {
        BNA_RXMODE_PROMISC      = 1,
-       BNA_RXMODE_ALLMULTI     = 2
+       BNA_RXMODE_DEFAULT      = 2,
+       BNA_RXMODE_ALLMULTI     = 4
 };
 
 enum bna_rx_event {
        RX_E_START                      = 1,
        RX_E_STOP                       = 2,
        RX_E_FAIL                       = 3,
-       RX_E_RXF_STARTED                = 4,
-       RX_E_RXF_STOPPED                = 5,
-       RX_E_RXQ_STOPPED                = 6,
+       RX_E_STARTED                    = 4,
+       RX_E_STOPPED                    = 5,
+       RX_E_RXF_STARTED                = 6,
+       RX_E_RXF_STOPPED                = 7,
+       RX_E_CLEANUP_DONE               = 8,
 };
 
 enum bna_rx_state {
 };
 
 enum bna_rx_flags {
-       BNA_RX_F_ENABLE         = 0x01,         /* bnad enabled rxf */
-       BNA_RX_F_PORT_ENABLED   = 0x02,         /* Port object is enabled */
-       BNA_RX_F_PORT_FAILED    = 0x04,         /* Port in failed state */
+       BNA_RX_F_ENET_STARTED   = 1,
+       BNA_RX_F_ENABLED        = 2,
 };
 
 enum bna_rx_mod_flags {
-       BNA_RX_MOD_F_PORT_STARTED       = 1,
-       BNA_RX_MOD_F_PORT_LOOPBACK      = 2,
+       BNA_RX_MOD_F_ENET_STARTED       = 1,
+       BNA_RX_MOD_F_ENET_LOOPBACK      = 2,
 };
 
 enum bna_rxf_oper_state {
 };
 
 enum bna_rxf_flags {
-       BNA_RXF_FL_STOP_PENDING         = 0x01,
-       BNA_RXF_FL_FAILED               = 0x02,
-       BNA_RXF_FL_RSS_CONFIG_PENDING   = 0x04,
-       BNA_RXF_FL_OPERSTATE_CHANGED    = 0x08,
-       BNA_RXF_FL_RXF_ENABLED          = 0x10,
-       BNA_RXF_FL_VLAN_CONFIG_PENDING  = 0x20,
+       BNA_RXF_F_PAUSED                = 1,
 };
 
 enum bna_rxf_event {
        RXF_E_START                     = 1,
        RXF_E_STOP                      = 2,
        RXF_E_FAIL                      = 3,
-       RXF_E_CAM_FLTR_MOD              = 4,
-       RXF_E_STARTED                   = 5,
-       RXF_E_STOPPED                   = 6,
-       RXF_E_CAM_FLTR_RESP             = 7,
-       RXF_E_PAUSE                     = 8,
-       RXF_E_RESUME                    = 9,
-       RXF_E_STAT_CLEARED              = 10,
+       RXF_E_CONFIG                    = 4,
+       RXF_E_PAUSE                     = 5,
+       RXF_E_RESUME                    = 6,
+       RXF_E_FW_RESP                   = 7,
 };
 
 enum bna_rxf_state {
        BNA_PORT_T_LOOPBACK_EXTERNAL    = 2,
 };
 
+enum bna_enet_type {
+       BNA_ENET_T_REGULAR              = 0,
+       BNA_ENET_T_LOOPBACK_INTERNAL    = 1,
+       BNA_ENET_T_LOOPBACK_EXTERNAL    = 2,
+};
+
 enum bna_link_status {
        BNA_LINK_DOWN           = 0,
        BNA_LINK_UP             = 1,
        BNA_LLPORT_F_RX_STARTED         = 4
 };
 
+enum bna_ethport_flags {
+       BNA_ETHPORT_F_ADMIN_UP          = 1,
+       BNA_ETHPORT_F_PORT_ENABLED      = 2,
+       BNA_ETHPORT_F_RX_STARTED        = 4,
+};
+
 enum bna_port_flags {
        BNA_PORT_F_DEVICE_READY = 1,
        BNA_PORT_F_ENABLED      = 2,
        BNA_PORT_F_MTU_CHANGED  = 8
 };
 
+enum bna_enet_flags {
+       BNA_ENET_F_IOCETH_READY         = 1,
+       BNA_ENET_F_ENABLED              = 2,
+       BNA_ENET_F_PAUSE_CHANGED        = 4,
+       BNA_ENET_F_MTU_CHANGED          = 8
+};
+
+enum bna_rss_flags {
+       BNA_RSS_F_RIT_PENDING           = 1,
+       BNA_RSS_F_CFG_PENDING           = 2,
+       BNA_RSS_F_STATUS_PENDING        = 4,
+};
+
+enum bna_mod_flags {
+       BNA_MOD_F_INIT_DONE             = 1,
+};
+
 enum bna_pkt_rates {
        BNA_PKT_RATE_10K                = 10000,
        BNA_PKT_RATE_20K                = 20000,
        BNA_BIAS_T_MAX                  = 2
 };
 
+#define BNA_MAX_NAME_SIZE      64
+struct bna_ident {
+       int                     id;
+       char                    name[BNA_MAX_NAME_SIZE];
+};
+
 struct bna_mac {
        /* This should be the first one */
        struct list_head                        qe;
        u8                      addr[ETH_ALEN];
+       struct bna_mcam_handle *handle;
 };
 
 struct bna_mem_descr {
        u32             page_size;
 };
 
+struct bna_attr {
+       int                     num_txq;
+       int                     num_rxp;
+       int                     num_ucmac;
+       int                     num_mcmac;
+       int                     max_rit_size;
+};
+
 /**
  *
- * Device
+ * IOCEth
  *
  */
 
-struct bna_device {
+struct bna_ioceth {
        bfa_fsm_t               fsm;
        struct bfa_ioc ioc;
 
-       enum bna_intr_type intr_type;
-       int                     vector;
+       struct bna_attr attr;
+       struct bfa_msgq_cmd_entry msgq_cmd;
+       struct bfi_enet_attr_req attr_req;
 
-       void (*ready_cbfn)(struct bnad *bnad, enum bna_cb_status status);
-       struct bnad *ready_cbarg;
-
-       void (*stop_cbfn)(struct bnad *bnad, enum bna_cb_status status);
+       void (*stop_cbfn)(struct bnad *bnad);
        struct bnad *stop_cbarg;
 
        struct bna *bna;
        struct bna *bna;
 };
 
+/**
+ *
+ * Enet
+ *
+ */
+
+struct bna_enet {
+       bfa_fsm_t               fsm;
+       enum bna_enet_flags flags;
+
+       enum bna_enet_type type;
+
+       struct bna_pause_config pause_config;
+       int                     mtu;
+
+       /* Callback for bna_enet_disable(), enet_stop() */
+       void (*stop_cbfn)(void *);
+       void                    *stop_cbarg;
+
+       /* Callback for bna_enet_pause_config() */
+       void (*pause_cbfn)(struct bnad *);
+
+       /* Callback for bna_enet_mtu_set() */
+       void (*mtu_cbfn)(struct bnad *);
+
+       struct bfa_wc           chld_stop_wc;
+
+       struct bfa_msgq_cmd_entry msgq_cmd;
+       struct bfi_enet_set_pause_req pause_req;
+
+       struct bna *bna;
+};
+
+/**
+ *
+ * Ethport
+ *
+ */
+
+struct bna_ethport {
+       bfa_fsm_t               fsm;
+       enum bna_ethport_flags flags;
+
+       enum bna_link_status link_status;
+
+       int                     rx_started_count;
+
+       void (*stop_cbfn)(struct bna_enet *);
+
+       void (*adminup_cbfn)(struct bnad *, enum bna_cb_status);
+
+       void (*link_cbfn)(struct bnad *, enum bna_link_status);
+
+       struct bfa_msgq_cmd_entry msgq_cmd;
+       union {
+               struct bfi_enet_enable_req admin_req;
+               struct bfi_enet_diag_lb_req lpbk_req;
+       } bfi_enet_cmd;
+
+       struct bna *bna;
+};
+
 /**
  *
  * Interrupt Block
        u32             doorbell_ack;
 };
 
-/* Interrupt timer configuration */
-struct bna_ib_config {
-       u8              coalescing_timeo;    /* Unit is 5usec. */
-
-       int                     interpkt_count;
-       int                     interpkt_timeo;
-
-       enum ib_flags ctrl_flags;
-};
-
 /* IB structure */
 struct bna_ib {
-       /* This should be the first one */
-       struct list_head                        qe;
-
-       int                     ib_id;
-
-       int                     ref_count;
-       int                     start_count;
-
        struct bna_dma_addr ib_seg_host_addr;
        void            *ib_seg_host_addr_kva;
-       u32             idx_mask; /* Size >= BNA_IBIDX_MAX_SEGSIZE */
-
-       struct bna_ibidx_seg *idx_seg;
 
        struct bna_ib_dbell door_bell;
 
-       struct bna_intr *intr;
-
-       struct bna_ib_config ib_config;
-
-       struct bna *bna;
-};
-
-/* IB module - keeps track of IBs and interrupts */
-struct bna_ib_mod {
-       struct bna_ib *ib;              /* BFI_MAX_IB entries */
-       struct bna_intr *intr;          /* BFI_MAX_IB entries */
-       struct bna_ibidx_seg *idx_seg;  /* BNA_IBIDX_TOTAL_SEGS */
-
-       struct list_head                        ib_free_q;
-
-       struct list_head                ibidx_seg_pool[BFI_IBIDX_TOTAL_POOLS];
+       enum bna_intr_type      intr_type;
+       int                     intr_vector;
 
-       struct list_head                        intr_free_q;
-       struct list_head                        intr_active_q;
+       u8                      coalescing_timeo;    /* Unit is 5usec. */
 
-       struct bna *bna;
+       int                     interpkt_count;
+       int                     interpkt_timeo;
 };
 
 /**
        /* Control path */
        struct bna_txq *txq;
        struct bnad *bnad;
+       void                    *priv; /* BNAD's cookie */
        enum bna_intr_type intr_type;
        int                     intr_vector;
        u8                      priority; /* Current priority */
        /* This should be the first one */
        struct list_head                        qe;
 
-       int                     txq_id;
-
        u8                      priority;
 
        struct bna_qpt qpt;
        struct bna_tcb *tcb;
-       struct bna_ib *ib;
-       int                     ib_seg_offset;
+       struct bna_ib ib;
 
        struct bna_tx *tx;
 
+       int                     hw_id;
+
        u64             tx_packets;
        u64             tx_bytes;
 };
 
-/* TxF structure (hardware Tx Function) */
-struct bna_txf {
-       int                     txf_id;
-       enum txf_flags ctrl_flags;
-       u16             vlan;
-};
-
 /* Tx object */
 struct bna_tx {
        /* This should be the first one */
        struct list_head                        qe;
+       int                     rid;
+       int                     hw_id;
 
        bfa_fsm_t               fsm;
        enum bna_tx_flags flags;
 
        enum bna_tx_type type;
+       int                     num_txq;
 
        struct list_head                        txq_q;
-       struct bna_txf txf;
+       u16                     txf_vlan_id;
 
        /* Tx event handlers */
        void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
        void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
-       void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
-       void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
-       void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
+       void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *);
+       void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *);
+       void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *);
 
        /* callback for bna_tx_disable(), bna_tx_stop() */
-       void (*stop_cbfn)(void *arg, struct bna_tx *tx,
-                               enum bna_cb_status status);
+       void (*stop_cbfn)(void *arg, struct bna_tx *tx);
        void                    *stop_cbarg;
 
        /* callback for bna_tx_prio_set() */
-       void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx,
-                               enum bna_cb_status status);
+       void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx);
 
-       struct bfa_wc           txq_stop_wc;
-
-       struct bna_mbox_qe mbox_qe;
+       struct bfa_msgq_cmd_entry msgq_cmd;
+       union {
+               struct bfi_enet_tx_cfg_req      cfg_req;
+               struct bfi_enet_req             req;
+               struct bfi_enet_tx_cfg_rsp      cfg_rsp;
+       } bfi_enet_cmd;
 
        struct bna *bna;
        void                    *priv;  /* bnad's cookie */
 };
 
+/* Tx object configuration used during creation */
 struct bna_tx_config {
        int                     num_txq;
        int                     txq_depth;
+       int                     coalescing_timeo;
        enum bna_tx_type tx_type;
 };
 
        void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
        void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
        /* Mandatory */
-       void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
-       void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
-       void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
+       void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *);
+       void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *);
+       void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *);
 };
 
 /* Tx module - keeps track of free, active tx objects */
        struct list_head                        txq_free_q;
 
        /* callback for bna_tx_mod_stop() */
-       void (*stop_cbfn)(struct bna_port *port,
-                               enum bna_cb_status status);
+       void (*stop_cbfn)(struct bna_enet *enet);
 
        struct bfa_wc           tx_stop_wc;
 
        enum bna_tx_mod_flags flags;
 
-       int                     priority;
-       int                     cee_link;
+       u8                      prio_map;
+       int                     default_prio;
+       int                     iscsi_over_cee;
+       int                     iscsi_prio;
+       int                     prio_reconfigured;
 
-       u32             txf_bmap[2];
+       u32                     rid_mask;
 
        struct bna *bna;
 };
        struct bna_rit_entry *rit;
 };
 
-struct bna_rit_mod {
-       struct bna_rit_entry *rit;
-       struct bna_rit_segment *rit_segment;
-
-       struct list_head                rit_seg_pool[BFI_RIT_SEG_TOTAL_POOLS];
-};
-
 /**
  *
  * Rx object
        int                     page_count;
        /* Control path */
        struct bna_rxq *rxq;
-       struct bna_cq *cq;
+       struct bna_ccb *ccb;
        struct bnad *bnad;
+       void                    *priv; /* BNAD's cookie */
        unsigned long           flags;
        int                     id;
 };
 /* RxQ structure - QPT, configuration */
 struct bna_rxq {
        struct list_head                        qe;
-       int                     rxq_id;
 
        int                     buffer_size;
        int                     q_depth;
        struct bna_rxp *rxp;
        struct bna_rx *rx;
 
+       int                     hw_id;
+
        u64             rx_packets;
        u64             rx_bytes;
        u64             rx_packets_with_error;
        /* Control path */
        struct bna_cq *cq;
        struct bnad *bnad;
+       void                    *priv; /* BNAD's cookie */
        enum bna_intr_type intr_type;
        int                     intr_vector;
        u8                      rx_coalescing_timeo; /* For NAPI */
 
 /* CQ QPT, configuration  */
 struct bna_cq {
-       int                     cq_id;
-
        struct bna_qpt qpt;
        struct bna_ccb *ccb;
 
-       struct bna_ib *ib;
-       u8                      ib_seg_offset;
+       struct bna_ib ib;
 
        struct bna_rx *rx;
 };
 
 struct bna_rss_config {
-       enum rss_hash_type hash_type;
+       enum bfi_enet_rss_type  hash_type;
        u8                      hash_mask;
-       u32             toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
+       u32             toeplitz_hash_key[BFI_ENET_RSS_KEY_LEN];
 };
 
 struct bna_hds_config {
-       enum hds_header_type hdr_type;
-       int                     header_size;
+       enum bfi_enet_hds_type  hdr_type;
+       int                     forced_offset;
 };
 
-/* This structure is used during RX creation */
+/* Rx object configuration used during creation */
 struct bna_rx_config {
        enum bna_rx_type rx_type;
        int                     num_paths;
        enum bna_rxp_type rxp_type;
        int                     paused;
        int                     q_depth;
+       int                     coalescing_timeo;
        /*
         * Small/Large (or Header/Data) buffer size to be configured
         * for SLR and HDS queue type. Large buffer size comes from
-        * port->mtu.
+        * enet->mtu.
         */
        int                     small_buff_size;
 
        enum bna_status rss_status;
        struct bna_rss_config rss_config;
 
-       enum bna_status hds_status;
        struct bna_hds_config hds_config;
 
        enum bna_status vlan_strip_status;
 
        /* MSI-x vector number for configuring RSS */
        int                     vector;
-
-       struct bna_mbox_qe mbox_qe;
-};
-
-/* HDS configuration structure */
-struct bna_rxf_hds {
-       enum hds_header_type hdr_type;
-       int                     header_size;
-};
-
-/* RSS configuration structure */
-struct bna_rxf_rss {
-       enum rss_hash_type hash_type;
-       u8                      hash_mask;
-       u32             toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
+       int                     hw_id;
 };
 
 /* RxF structure (hardware Rx Function) */
 struct bna_rxf {
        bfa_fsm_t               fsm;
-       int                     rxf_id;
-       enum rxf_flags ctrl_flags;
-       u16             default_vlan_tag;
-       enum bna_rxf_oper_state rxf_oper_state;
-       enum bna_status hds_status;
-       struct bna_rxf_hds hds_cfg;
-       enum bna_status rss_status;
-       struct bna_rxf_rss rss_cfg;
-       struct bna_rit_segment *rit_segment;
-       struct bna_rx *rx;
-       u32             forced_offset;
-       struct bna_mbox_qe mbox_qe;
-       int                     mcast_rxq_id;
+       enum bna_rxf_flags flags;
+
+       struct bfa_msgq_cmd_entry msgq_cmd;
+       union {
+               struct bfi_enet_enable_req req;
+               struct bfi_enet_rss_cfg_req rss_req;
+               struct bfi_enet_rit_req rit_req;
+               struct bfi_enet_rx_vlan_req vlan_req;
+               struct bfi_enet_mcast_add_req mcast_add_req;
+               struct bfi_enet_mcast_del_req mcast_del_req;
+               struct bfi_enet_ucast_req ucast_req;
+       } bfi_enet_cmd;
 
        /* callback for bna_rxf_start() */
-       void (*start_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
+       void (*start_cbfn) (struct bna_rx *rx);
        struct bna_rx *start_cbarg;
 
        /* callback for bna_rxf_stop() */
-       void (*stop_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
+       void (*stop_cbfn) (struct bna_rx *rx);
        struct bna_rx *stop_cbarg;
 
-       /* callback for bna_rxf_receive_enable() / bna_rxf_receive_disable() */
-       void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx,
-                       enum bna_cb_status status);
+       /* callback for bna_rx_receive_pause() / bna_rx_receive_resume() */
+       void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx);
        struct bnad *oper_state_cbarg;
 
        /**
         *      bna_rxf_{ucast/mcast}_del(),
         *      bna_rxf_mode_set()
         */
-       void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx,
-                               enum bna_cb_status status);
+       void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx);
        struct bnad *cam_fltr_cbarg;
 
-       enum bna_rxf_flags rxf_flags;
-
        /* List of unicast addresses yet to be applied to h/w */
        struct list_head                        ucast_pending_add_q;
        struct list_head                        ucast_pending_del_q;
+       struct bna_mac *ucast_pending_mac;
        int                     ucast_pending_set;
        /* ucast addresses applied to the h/w */
        struct list_head                        ucast_active_q;
-       struct bna_mac *ucast_active_mac;
+       struct bna_mac ucast_active_mac;
+       int                     ucast_active_set;
 
        /* List of multicast addresses yet to be applied to h/w */
        struct list_head                        mcast_pending_add_q;
        struct list_head                        mcast_pending_del_q;
        /* multicast addresses applied to the h/w */
        struct list_head                        mcast_active_q;
+       struct list_head                        mcast_handle_q;
 
        /* Rx modes yet to be applied to h/w */
        enum bna_rxmode rxmode_pending;
        /* Rx modes applied to h/w */
        enum bna_rxmode rxmode_active;
 
+       u8                      vlan_pending_bitmask;
        enum bna_status vlan_filter_status;
-       u32             vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
+       u32     vlan_filter_table[(BFI_ENET_VLAN_ID_MAX) / 32];
+       bool                    vlan_strip_pending;
+       enum bna_status         vlan_strip_status;
+
+       enum bna_rss_flags      rss_pending;
+       enum bna_status         rss_status;
+       struct bna_rss_config   rss_cfg;
+       u8                      *rit;
+       int                     rit_size;
+
+       struct bna_rx           *rx;
 };
 
 /* Rx object */
 struct bna_rx {
        /* This should be the first one */
        struct list_head                        qe;
+       int                     rid;
+       int                     hw_id;
 
        bfa_fsm_t               fsm;
 
        enum bna_rx_type type;
 
-       /* list-head for RX path objects */
+       int                     num_paths;
        struct list_head                        rxp_q;
 
+       struct bna_hds_config   hds_cfg;
+
        struct bna_rxf rxf;
 
        enum bna_rx_flags rx_flags;
 
-       struct bna_mbox_qe mbox_qe;
-
-       struct bfa_wc           rxq_stop_wc;
+       struct bfa_msgq_cmd_entry msgq_cmd;
+       union {
+               struct bfi_enet_rx_cfg_req      cfg_req;
+               struct bfi_enet_req             req;
+               struct bfi_enet_rx_cfg_rsp      cfg_rsp;
+       } bfi_enet_cmd;
 
        /* Rx event handlers */
        void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
        void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
        void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
        void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
-       void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
-       void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
+       void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *);
+       void (*rx_post_cbfn)(struct bnad *, struct bna_rx *);
 
        /* callback for bna_rx_disable(), bna_rx_stop() */
-       void (*stop_cbfn)(void *arg, struct bna_rx *rx,
-                               enum bna_cb_status status);
+       void (*stop_cbfn)(void *arg, struct bna_rx *rx);
        void                    *stop_cbarg;
 
        struct bna *bna;
        void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
        void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
        /* Mandatory */
-       void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
-       void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
+       void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *);
+       void (*rx_post_cbfn)(struct bnad *, struct bna_rx *);
 };
 
 /* Rx module - keeps track of free, active rx objects */
        enum bna_rx_mod_flags flags;
 
        /* callback for bna_rx_mod_stop() */
-       void (*stop_cbfn)(struct bna_port *port,
-                               enum bna_cb_status status);
+       void (*stop_cbfn)(struct bna_enet *enet);
 
        struct bfa_wc           rx_stop_wc;
        u32             dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX];
-       u32             rxf_bmap[2];
+       u32             rid_mask;
 };
 
 /**
        struct bna *bna;
 };
 
+struct bna_mcam_handle {
+       /* This should be the first one */
+       struct list_head                        qe;
+       int                     handle;
+       int                     refcnt;
+};
+
 struct bna_mcam_mod {
        struct bna_mac *mcmac;          /* BFI_MAX_MCMAC entries */
+       struct bna_mcam_handle *mchandle;       /* BFI_MAX_MCMAC entries */
        struct list_head                        free_q;
+       struct list_head                        free_handle_q;
 
        struct bna *bna;
 };
        int                     num_active_mcast;
        int                     rxmode_active;
        int                     vlan_filter_status;
-       u32             vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
        int                     rss_status;
        int                     hds_status;
 };
        int                     priority;
        int                     num_active_tx;
        int                     num_active_rx;
-       struct bna_tx_stats tx_stats[BFI_MAX_TXQ];
-       struct bna_rx_stats rx_stats[BFI_MAX_RXQ];
 };
 
 struct bna_stats {
-       u32             txf_bmap[2];
-       u32             rxf_bmap[2];
-       struct bfi_ll_stats     *hw_stats;
-       struct bna_sw_stats *sw_stats;
+       struct bna_dma_addr     hw_stats_dma;
+       struct bfi_enet_stats   *hw_stats_kva;
+       struct bfi_enet_stats   hw_stats;
+};
+
+struct bna_stats_mod {
+       bool            ioc_ready;
+       bool            stats_get_busy;
+       bool            stats_clr_busy;
+       struct bfa_msgq_cmd_entry stats_get_cmd;
+       struct bfa_msgq_cmd_entry stats_clr_cmd;
+       struct bfi_enet_stats_req stats_get;
+       struct bfi_enet_stats_req stats_clr;
 };
 
 /**
  */
 
 struct bna {
+       struct bna_ident ident;
        struct bfa_pcidev pcidev;
 
-       int                     port_num;
+       struct bna_reg regs;
+       struct bna_bit_defn bits;
 
-       struct bna_chip_regs regs;
-
-       struct bna_dma_addr hw_stats_dma;
        struct bna_stats stats;
 
-       struct bna_device device;
+       struct bna_ioceth ioceth;
        struct bfa_cee cee;
+       struct bfa_msgq msgq;
 
-       struct bna_mbox_mod mbox_mod;
-
-       struct bna_port port;
+       struct bna_ethport ethport;
+       struct bna_enet enet;
+       struct bna_stats_mod stats_mod;
 
        struct bna_tx_mod tx_mod;
-
        struct bna_rx_mod rx_mod;
-
-       struct bna_ib_mod ib_mod;
-
        struct bna_ucam_mod ucam_mod;
        struct bna_mcam_mod mcam_mod;
 
-       struct bna_rit_mod rit_mod;
-
-       int                     rxf_promisc_id;
+       enum bna_mod_flags mod_flags;
 
-       struct bna_mbox_qe mbox_qe;
+       int                     default_mode_rid;
+       int                     promisc_rid;
 
        struct bnad *bnad;
 };
-
 #endif /* __BNA_TYPES_H__ */
 
        struct bnad_skb_unmap *unmap_array;
        struct sk_buff *skb;
        u32 flags, unmap_cons;
-       u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
        struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
+       struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
+
+       set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
 
-       if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
+       if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
+               clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
                return 0;
+       }
 
        prefetch(bnad->netdev);
        BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
                packets++;
                BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
 
-               if (qid0 == cmpl->rxq_id)
-                       rcb = ccb->rcb[0];
-               else
+               if (bna_is_small_rxq(cmpl->rxq_id))
                        rcb = ccb->rcb[1];
+               else
+                       rcb = ccb->rcb[0];
 
                unmap_q = rcb->unmap_q;
                unmap_array = unmap_q->unmap_array;
                if (flags & BNA_CQ_EF_VLAN)
                        __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
 
-               if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
-                       struct bnad_rx_ctrl *rx_ctrl;
-
-                       rx_ctrl = (struct bnad_rx_ctrl *) ccb->ctrl;
+               if (skb->ip_summed == CHECKSUM_UNNECESSARY)
                        napi_gro_receive(&rx_ctrl->napi, skb);
-               } else {
+               else {
                        netif_receive_skb(skb);
                }
 
                        bna_ib_ack(ccb->i_dbell, 0);
        }
 
+       clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
+
        return packets;
 }
 
 
        bna_intr_status_get(&bnad->bna, intr_status);
 
-       if (BNA_IS_MBOX_ERR_INTR(intr_status))
+       if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
                bna_mbox_handler(&bnad->bna, intr_status);
 
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
        struct bnad *bnad = (struct bnad *)data;
        struct bnad_rx_info *rx_info;
        struct bnad_rx_ctrl *rx_ctrl;
+       struct bna_tcb *tcb = NULL;
 
        if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
                return IRQ_NONE;
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
 
-       if (BNA_IS_MBOX_ERR_INTR(intr_status))
+       if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
                bna_mbox_handler(&bnad->bna, intr_status);
 
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
        /* Process data interrupts */
        /* Tx processing */
        for (i = 0; i < bnad->num_tx; i++) {
-               for (j = 0; j < bnad->num_txq_per_tx; j++)
-                       bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
+               for (j = 0; j < bnad->num_txq_per_tx; j++) {
+                       tcb = bnad->tx_info[i].tcb[j];
+                       if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
+                               bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
+               }
        }
        /* Rx processing */
        for (i = 0; i < bnad->num_rx; i++) {
 
 /* Callbacks */
 void
-bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
+bnad_cb_mbox_intr_enable(struct bnad *bnad)
 {
        bnad_enable_mbox_irq(bnad);
 }
 
 void
-bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
+bnad_cb_mbox_intr_disable(struct bnad *bnad)
 {
        bnad_disable_mbox_irq(bnad);
 }
 
 void
-bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
+bnad_cb_ioceth_ready(struct bnad *bnad)
+{
+       bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
+       complete(&bnad->bnad_completions.ioc_comp);
+}
+
+void
+bnad_cb_ioceth_failed(struct bnad *bnad)
 {
+       bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
        complete(&bnad->bnad_completions.ioc_comp);
-       bnad->bnad_completions.ioc_comp_status = status;
 }
 
 void
-bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
+bnad_cb_ioceth_disabled(struct bnad *bnad)
 {
+       bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
        complete(&bnad->bnad_completions.ioc_comp);
-       bnad->bnad_completions.ioc_comp_status = status;
 }
 
 static void
-bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
+bnad_cb_enet_disabled(void *arg)
 {
        struct bnad *bnad = (struct bnad *)arg;
 
-       complete(&bnad->bnad_completions.port_comp);
-
        netif_carrier_off(bnad->netdev);
+       complete(&bnad->bnad_completions.enet_comp);
 }
 
 void
-bnad_cb_port_link_status(struct bnad *bnad,
+bnad_cb_ethport_link_status(struct bnad *bnad,
                        enum bna_link_status link_status)
 {
        bool link_up = 0;
        link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
 
        if (link_status == BNA_CEE_UP) {
+               if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
+                       BNAD_UPDATE_CTR(bnad, cee_toggle);
                set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
-               BNAD_UPDATE_CTR(bnad, cee_up);
-       } else
+       } else {
+               if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
+                       BNAD_UPDATE_CTR(bnad, cee_toggle);
                clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
+       }
 
        if (link_up) {
                if (!netif_carrier_ok(bnad->netdev)) {
-                       struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
-                       if (!tcb)
-                               return;
-                       pr_warn("bna: %s link up\n",
+                       uint tx_id, tcb_id;
+                       printk(KERN_WARNING "bna: %s link up\n",
                                bnad->netdev->name);
                        netif_carrier_on(bnad->netdev);
                        BNAD_UPDATE_CTR(bnad, link_toggle);
-                       if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
-                               /* Force an immediate Transmit Schedule */
-                               pr_info("bna: %s TX_STARTED\n",
-                                       bnad->netdev->name);
-                               netif_wake_queue(bnad->netdev);
-                               BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
-                       } else {
-                               netif_stop_queue(bnad->netdev);
-                               BNAD_UPDATE_CTR(bnad, netif_queue_stop);
+                       for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
+                               for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
+                                     tcb_id++) {
+                                       struct bna_tcb *tcb =
+                                       bnad->tx_info[tx_id].tcb[tcb_id];
+                                       u32 txq_id;
+                                       if (!tcb)
+                                               continue;
+
+                                       txq_id = tcb->id;
+
+                                       if (test_bit(BNAD_TXQ_TX_STARTED,
+                                                    &tcb->flags)) {
+                                               /*
+                                                * Force an immediate
+                                                * Transmit Schedule */
+                                               printk(KERN_INFO "bna: %s %d "
+                                                     "TXQ_STARTED\n",
+                                                      bnad->netdev->name,
+                                                      txq_id);
+                                               netif_wake_subqueue(
+                                                               bnad->netdev,
+                                                               txq_id);
+                                               BNAD_UPDATE_CTR(bnad,
+                                                       netif_queue_wakeup);
+                                       } else {
+                                               netif_stop_subqueue(
+                                                               bnad->netdev,
+                                                               txq_id);
+                                               BNAD_UPDATE_CTR(bnad,
+                                                       netif_queue_stop);
+                                       }
+                               }
                        }
                }
        } else {
                if (netif_carrier_ok(bnad->netdev)) {
-                       pr_warn("bna: %s link down\n",
+                       printk(KERN_WARNING "bna: %s link down\n",
                                bnad->netdev->name);
                        netif_carrier_off(bnad->netdev);
                        BNAD_UPDATE_CTR(bnad, link_toggle);
 }
 
 static void
-bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
-                       enum bna_cb_status status)
+bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
 {
        struct bnad *bnad = (struct bnad *)arg;
 
 }
 
 static void
-bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
+bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
 {
        struct bnad_tx_info *tx_info =
-                       (struct bnad_tx_info *)tcb->txq->tx->priv;
-
-       if (tx_info != &bnad->tx_info[0])
-               return;
+                       (struct bnad_tx_info *)tx->priv;
+       struct bna_tcb *tcb;
+       u32 txq_id;
+       int i;
 
-       clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
-       netif_stop_queue(bnad->netdev);
-       pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
+       for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
+               tcb = tx_info->tcb[i];
+               if (!tcb)
+                       continue;
+               txq_id = tcb->id;
+               clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
+               netif_stop_subqueue(bnad->netdev, txq_id);
+               printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
+                       bnad->netdev->name, txq_id);
+       }
 }
 
 static void
-bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
+bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
 {
-       struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+       struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
+       struct bna_tcb *tcb;
+       struct bnad_unmap_q *unmap_q;
+       u32 txq_id;
+       int i;
 
-       if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
-               return;
+       for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
+               tcb = tx_info->tcb[i];
+               if (!tcb)
+                       continue;
+               txq_id = tcb->id;
 
-       clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
+               unmap_q = tcb->unmap_q;
 
-       while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
-               cpu_relax();
+               if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
+                       continue;
 
-       bnad_free_all_txbufs(bnad, tcb);
+               while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+                       cpu_relax();
 
-       unmap_q->producer_index = 0;
-       unmap_q->consumer_index = 0;
+               bnad_free_all_txbufs(bnad, tcb);
 
-       smp_mb__before_clear_bit();
-       clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+               unmap_q->producer_index = 0;
+               unmap_q->consumer_index = 0;
+
+               smp_mb__before_clear_bit();
+               clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+
+               set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
+
+               if (netif_carrier_ok(bnad->netdev)) {
+                       printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
+                               bnad->netdev->name, txq_id);
+                       netif_wake_subqueue(bnad->netdev, txq_id);
+                       BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+               }
+       }
 
        /*
-        * Workaround for first device enable failure & we
+        * Workaround for first ioceth enable failure & we
         * get a 0 MAC address. We try to get the MAC address
         * again here.
         */
        if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
-               bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
+               bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
                bnad_set_netdev_perm_addr(bnad);
        }
-
-       set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
-
-       if (netif_carrier_ok(bnad->netdev)) {
-               pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
-               netif_wake_queue(bnad->netdev);
-               BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
-       }
 }
 
 static void
-bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
+bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
 {
-       /* Delay only once for the whole Tx Path Shutdown */
-       if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
-               mdelay(BNAD_TXRX_SYNC_MDELAY);
+       struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
+       struct bna_tcb *tcb;
+       int i;
+
+       for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
+               tcb = tx_info->tcb[i];
+               if (!tcb)
+                       continue;
+       }
+
+       mdelay(BNAD_TXRX_SYNC_MDELAY);
+       bna_tx_cleanup_complete(tx);
 }
 
 static void
-bnad_cb_rx_cleanup(struct bnad *bnad,
-                       struct bna_ccb *ccb)
+bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
 {
-       clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
+       struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+       struct bna_ccb *ccb;
+       struct bnad_rx_ctrl *rx_ctrl;
+       int i;
+
+       mdelay(BNAD_TXRX_SYNC_MDELAY);
+
+       for (i = 0; i < BNAD_MAX_RXPS_PER_RX; i++) {
+               rx_ctrl = &rx_info->rx_ctrl[i];
+               ccb = rx_ctrl->ccb;
+               if (!ccb)
+                       continue;
+
+               clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
+
+               if (ccb->rcb[1])
+                       clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
 
-       if (ccb->rcb[1])
-               clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
+               while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
+                       cpu_relax();
+       }
 
-       if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
-               mdelay(BNAD_TXRX_SYNC_MDELAY);
+       bna_rx_cleanup_complete(rx);
 }
 
 static void
-bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
+bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
 {
-       struct bnad_unmap_q *unmap_q = rcb->unmap_q;
-
-       clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
-
-       if (rcb == rcb->cq->ccb->rcb[0])
-               bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
+       struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+       struct bna_ccb *ccb;
+       struct bna_rcb *rcb;
+       struct bnad_rx_ctrl *rx_ctrl;
+       struct bnad_unmap_q *unmap_q;
+       int i;
+       int j;
 
-       bnad_free_all_rxbufs(bnad, rcb);
+       for (i = 0; i < BNAD_MAX_RXPS_PER_RX; i++) {
+               rx_ctrl = &rx_info->rx_ctrl[i];
+               ccb = rx_ctrl->ccb;
+               if (!ccb)
+                       continue;
 
-       set_bit(BNAD_RXQ_STARTED, &rcb->flags);
+               bnad_cq_cmpl_init(bnad, ccb);
 
-       /* Now allocate & post buffers for this RCB */
-       /* !!Allocation in callback context */
-       if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
-               if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
-                        >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
-                       bnad_alloc_n_post_rxbufs(bnad, rcb);
-               smp_mb__before_clear_bit();
-               clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
+               for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
+                       rcb = ccb->rcb[j];
+                       if (!rcb)
+                               continue;
+                       bnad_free_all_rxbufs(bnad, rcb);
+
+                       set_bit(BNAD_RXQ_STARTED, &rcb->flags);
+                       unmap_q = rcb->unmap_q;
+
+                       /* Now allocate & post buffers for this RCB */
+                       /* !!Allocation in callback context */
+                       if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
+                               if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
+                                       >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+                                       bnad_alloc_n_post_rxbufs(bnad, rcb);
+                                       smp_mb__before_clear_bit();
+                               clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
+                       }
+               }
        }
 }
 
 static void
-bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
-                       enum bna_cb_status status)
+bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
 {
        struct bnad *bnad = (struct bnad *)arg;
 
 }
 
 static void
-bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
-                               enum bna_cb_status status)
+bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
 {
-       bnad->bnad_completions.mcast_comp_status = status;
+       bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
        complete(&bnad->bnad_completions.mcast_comp);
 }
 
                  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
 }
 
+static void
+bnad_cb_enet_mtu_set(struct bnad *bnad)
+{
+       bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
+       complete(&bnad->bnad_completions.mtu_comp);
+}
+
 /* Resource allocation, free functions */
 
 static void
 
 /* Free IRQ for Mailbox */
 static void
-bnad_mbox_irq_free(struct bnad *bnad,
-                  struct bna_intr_info *intr_info)
+bnad_mbox_irq_free(struct bnad *bnad)
 {
        int irq;
        unsigned long flags;
 
-       if (intr_info->idl == NULL)
-               return;
-
        spin_lock_irqsave(&bnad->bna_lock, flags);
        bnad_disable_mbox_irq(bnad);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        irq = BNAD_GET_MBOX_IRQ(bnad);
        free_irq(irq, bnad);
-
-       kfree(intr_info->idl);
 }
 
 /*
  * from bna
  */
 static int
-bnad_mbox_irq_alloc(struct bnad *bnad,
-                   struct bna_intr_info *intr_info)
+bnad_mbox_irq_alloc(struct bnad *bnad)
 {
        int             err = 0;
        unsigned long   irq_flags, flags;
        u32     irq;
        irq_handler_t   irq_handler;
 
-       /* Mbox should use only 1 vector */
-
-       intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
-       if (!intr_info->idl)
-               return -ENOMEM;
-
        spin_lock_irqsave(&bnad->bna_lock, flags);
        if (bnad->cfg_flags & BNAD_CF_MSIX) {
                irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
                irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
                irq_flags = 0;
-               intr_info->intr_type = BNA_INTR_T_MSIX;
-               intr_info->idl[0].vector = BNAD_MAILBOX_MSIX_INDEX;
        } else {
                irq_handler = (irq_handler_t)bnad_isr;
                irq = bnad->pcidev->irq;
                irq_flags = IRQF_SHARED;
-               intr_info->intr_type = BNA_INTR_T_INTX;
        }
 
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
        err = request_irq(irq, irq_handler, irq_flags,
                          bnad->mbox_irq_name, bnad);
 
-       if (err) {
-               kfree(intr_info->idl);
-               intr_info->idl = NULL;
-       }
-
        return err;
 }
 
 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
 static int
 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
-                   uint txrx_id, struct bna_intr_info *intr_info)
+                   u32 txrx_id, struct bna_intr_info *intr_info)
 {
        int i, vector_start = 0;
        u32 cfg_flags;
  */
 static int
 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
-                       uint tx_id, int num_txqs)
+                       u32 tx_id, int num_txqs)
 {
        int i;
        int err;
  */
 static int
 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
-                       uint rx_id, int num_rxps)
+                       u32 rx_id, int num_rxps)
 {
        int i;
        int err;
 /* Allocates memory and interrupt resources for Tx object */
 static int
 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
-                 uint tx_id)
+                 u32 tx_id)
 {
        int i, err = 0;
 
        unsigned long flags;
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
-       bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
+       bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 }
 
        unsigned long flags;
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
-       bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
+       bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 }
 
        unsigned long flags;
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
-       bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc);
+       bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 }
 
        unsigned long flags;
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
-       bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc);
+       bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 }
 
                return;
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
-       bna_stats_get(&bnad->bna);
+       bna_hw_stats_get(&bnad->bna);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 }
 
 
 /* Should be held with conf_lock held */
 void
-bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
+bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
 {
        struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
        struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        tx_info->tx = NULL;
+       tx_info->tx_id = 0;
 
        if (0 == tx_id)
                tasklet_kill(&bnad->tx_free_tasklet);
 
 /* Should be held with conf_lock held */
 int
-bnad_setup_tx(struct bnad *bnad, uint tx_id)
+bnad_setup_tx(struct bnad *bnad, u32 tx_id)
 {
        int err;
        struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
        struct bna_tx *tx;
        unsigned long flags;
 
+       tx_info->tx_id = tx_id;
+
        /* Initialize the Tx object configuration */
        tx_config->num_txq = bnad->num_txq_per_tx;
        tx_config->txq_depth = bnad->txq_depth;
        tx_config->tx_type = BNA_TX_T_REGULAR;
+       tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
 
        /* Initialize the tx event handlers */
        tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
 {
        rx_config->rx_type = BNA_RX_T_REGULAR;
        rx_config->num_paths = bnad->num_rxp_per_rx;
+       rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
 
        if (bnad->num_rxp_per_rx > 1) {
                rx_config->rss_status = BNA_STATUS_T_ENABLED;
                rx_config->rss_config.hash_type =
-                               (BFI_RSS_T_V4_TCP |
-                                BFI_RSS_T_V6_TCP |
-                                BFI_RSS_T_V4_IP  |
-                                BFI_RSS_T_V6_IP);
+                               (BFI_ENET_RSS_IPV6 |
+                                BFI_ENET_RSS_IPV6_TCP |
+                                BFI_ENET_RSS_IPV4 |
+                                BFI_ENET_RSS_IPV4_TCP);
                rx_config->rss_config.hash_mask =
                                bnad->num_rxp_per_rx - 1;
                get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
 
 /* Called with mutex_lock(&bnad->conf_mutex) held */
 void
-bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
+bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
 {
        struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
        struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
 
 /* Called with mutex_lock(&bnad->conf_mutex) held */
 int
-bnad_setup_rx(struct bnad *bnad, uint rx_id)
+bnad_setup_rx(struct bnad *bnad, u32 rx_id)
 {
        int err;
        struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
        struct bna_rx *rx;
        unsigned long flags;
 
+       rx_info->rx_id = rx_id;
+
        /* Initialize the Rx object configuration */
        bnad_init_rx_config(bnad, rx_config);
 
        u16 vid;
        unsigned long flags;
 
-       BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1)));
+       BUG_ON(!(VLAN_N_VID == BFI_ENET_VLAN_ID_MAX));
 
        for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
                spin_lock_irqsave(&bnad->bna_lock, flags);
 void
 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
 {
-       struct bfi_ll_stats_mac *mac_stats;
-       u64 bmap;
+       struct bfi_enet_stats_mac *mac_stats;
+       u32 bmap;
        int i;
 
-       mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
+       mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
        stats->rx_errors =
                mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
                mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
        stats->rx_crc_errors = mac_stats->rx_fcs_error;
        stats->rx_frame_errors = mac_stats->rx_alignment_error;
        /* recv'r fifo overrun */
-       bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
-               ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
-       for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+       bmap = bna_rx_rid_mask(&bnad->bna);
+       for (i = 0; bmap; i++) {
                if (bmap & 1) {
                        stats->rx_fifo_errors +=
                                bnad->stats.bna_stats->
-                                       hw_stats->rxf_stats[i].frame_drops;
+                                       hw_stats.rxf_stats[i].frame_drops;
                        break;
                }
                bmap >>= 1;
  * Called with bnad->bna_lock held b'cos of cfg_flags access
  */
 static void
-bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
+bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
 {
        bnad->num_txq_per_tx = 1;
        if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
                bnad->num_rxp_per_rx = 1;
 }
 
-/* Enable / disable device */
-static void
-bnad_device_disable(struct bnad *bnad)
+/* Enable / disable ioceth */
+static int
+bnad_ioceth_disable(struct bnad *bnad)
 {
        unsigned long flags;
-
-       init_completion(&bnad->bnad_completions.ioc_comp);
+       int err = 0;
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
-       bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
+       init_completion(&bnad->bnad_completions.ioc_comp);
+       bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
-       wait_for_completion(&bnad->bnad_completions.ioc_comp);
+       wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
+               msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
+
+       err = bnad->bnad_completions.ioc_comp_status;
+       return err;
 }
 
 static int
-bnad_device_enable(struct bnad *bnad)
+bnad_ioceth_enable(struct bnad *bnad)
 {
        int err = 0;
        unsigned long flags;
 
-       init_completion(&bnad->bnad_completions.ioc_comp);
-
        spin_lock_irqsave(&bnad->bna_lock, flags);
-       bna_device_enable(&bnad->bna.device);
+       init_completion(&bnad->bnad_completions.ioc_comp);
+       bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
+       bna_ioceth_enable(&bnad->bna.ioceth);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
-       wait_for_completion(&bnad->bnad_completions.ioc_comp);
+       wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
+               msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
 
-       if (bnad->bnad_completions.ioc_comp_status)
-               err = bnad->bnad_completions.ioc_comp_status;
+       err = bnad->bnad_completions.ioc_comp_status;
 
        return err;
 }
 
 /* Free BNA resources */
 static void
-bnad_res_free(struct bnad *bnad)
+bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
+               u32 res_val_max)
 {
        int i;
-       struct bna_res_info *res_info = &bnad->res_info[0];
 
-       for (i = 0; i < BNA_RES_T_MAX; i++) {
-               if (res_info[i].res_type == BNA_RES_T_MEM)
-                       bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
-               else
-                       bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
-       }
+       for (i = 0; i < res_val_max; i++)
+               bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
 }
 
 /* Allocates memory and interrupt resources for BNA */
 static int
-bnad_res_alloc(struct bnad *bnad)
+bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
+               u32 res_val_max)
 {
        int i, err;
-       struct bna_res_info *res_info = &bnad->res_info[0];
 
-       for (i = 0; i < BNA_RES_T_MAX; i++) {
-               if (res_info[i].res_type == BNA_RES_T_MEM)
-                       err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
-               else
-                       err = bnad_mbox_irq_alloc(bnad,
-                                                 &res_info[i].res_u.intr_info);
+       for (i = 0; i < res_val_max; i++) {
+               err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
                if (err)
                        goto err_return;
        }
        return 0;
 
 err_return:
-       bnad_res_free(bnad);
+       bnad_res_free(bnad, res_info, res_val_max);
        return err;
 }
 
 
                spin_lock_irqsave(&bnad->bna_lock, flags);
                /* ret = #of vectors that we got */
-               bnad_q_num_adjust(bnad, ret);
+               bnad_q_num_adjust(bnad, ret, 0);
                spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
                bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
                        * bnad->num_rxp_per_rx) +
                         BNAD_MAILBOX_MSIX_VECTORS;
 
+               if (bnad->msix_num > ret)
+                       goto intx_mode;
+
                /* Try once more with adjusted numbers */
                /* If this fails, fall back to INTx */
                ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
 
        } else if (ret < 0)
                goto intx_mode;
+
+       pci_intx(bnad->pcidev, 0);
+
        return;
 
 intx_mode:
        pause_config.tx_pause = 0;
        pause_config.rx_pause = 0;
 
-       mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
+       mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
-       bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
-       bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
-       bna_port_enable(&bnad->bna.port);
+       bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
+       bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
+       bna_enet_enable(&bnad->bna.enet);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        /* Enable broadcast */
        /* Stop the stats timer */
        bnad_stats_timer_stop(bnad);
 
-       init_completion(&bnad->bnad_completions.port_comp);
+       init_completion(&bnad->bnad_completions.enet_comp);
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
-       bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
-                       bnad_cb_port_disabled);
+       bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
+                       bnad_cb_enet_disabled);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
-       wait_for_completion(&bnad->bnad_completions.port_comp);
+       wait_for_completion(&bnad->bnad_completions.enet_comp);
 
        bnad_cleanup_tx(bnad, 0);
        bnad_cleanup_rx(bnad, 0);
 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
        struct bnad *bnad = netdev_priv(netdev);
+       u32 txq_id = 0;
+       struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
 
        u16             txq_prod, vlan_tag = 0;
        u32             unmap_prod, wis, wis_used, wi_range;
        u32             vectors, vect_id, i, acked;
-       u32             tx_id;
        int                     err;
 
-       struct bnad_tx_info *tx_info;
-       struct bna_tcb *tcb;
-       struct bnad_unmap_q *unmap_q;
+       struct bnad_unmap_q *unmap_q = tcb->unmap_q;
        dma_addr_t              dma_addr;
        struct bna_txq_entry *txqent;
-       bna_txq_wi_ctrl_flag_t  flags;
+       u16     flags;
 
        if (unlikely
            (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
                return NETDEV_TX_OK;
        }
 
-       tx_id = 0;
-
-       tx_info = &bnad->tx_info[tx_id];
-       tcb = tx_info->tcb[tx_id];
-       unmap_q = tcb->unmap_q;
-
        /*
         * Takes care of the Tx that is scheduled between clearing the flag
-        * and the netif_stop_queue() call.
+        * and the netif_stop_all_queue() call.
         */
        if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
                dev_kfree_skb(skb);
        }
        wis = BNA_TXQ_WI_NEEDED(vectors);       /* 4 vectors per work item */
        acked = 0;
-       if (unlikely
-           (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
-            vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+       if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
+                       vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
                if ((u16) (*tcb->hw_consumer_index) !=
                    tcb->consumer_index &&
                    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
-               u32             size = frag->size;
+               u16             size = frag->size;
 
                if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
                        vect_id = 0;
 }
 
 static int
-bnad_change_mtu(struct net_device *netdev, int new_mtu)
+bnad_mtu_set(struct bnad *bnad, int mtu)
 {
-       int mtu, err = 0;
        unsigned long flags;
 
+       init_completion(&bnad->bnad_completions.mtu_comp);
+
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+       wait_for_completion(&bnad->bnad_completions.mtu_comp);
+
+       return bnad->bnad_completions.mtu_comp_status;
+}
+
+static int
+bnad_change_mtu(struct net_device *netdev, int new_mtu)
+{
+       int err, mtu = netdev->mtu;
        struct bnad *bnad = netdev_priv(netdev);
 
        if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
 
        netdev->mtu = new_mtu;
 
-       mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
-
-       spin_lock_irqsave(&bnad->bna_lock, flags);
-       bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
-       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
+       err = bnad_mtu_set(bnad, mtu);
+       if (err)
+               err = -EBUSY;
 
        mutex_unlock(&bnad->conf_mutex);
        return err;
 
 /*
  * Initialize locks
-       a) Per device mutes used for serializing configuration
+       a) Per ioceth mutes used for serializing configuration
           changes from OS interface
        b) spin lock used to protect bna state machine
  */
         */
        netdev = alloc_etherdev(sizeof(struct bnad));
        if (!netdev) {
-               dev_err(&pdev->dev, "alloc_etherdev failed\n");
+               dev_err(&pdev->dev, "netdev allocation failed\n");
                err = -ENOMEM;
                return err;
        }
        bnad = netdev_priv(netdev);
 
+       bnad_lock_init(bnad);
+
+       mutex_lock(&bnad->conf_mutex);
        /*
         * PCI initialization
         *      Output : using_dac = 1 for 64 bit DMA
        if (err)
                goto free_netdev;
 
-       bnad_lock_init(bnad);
        /*
         * Initialize bnad structure
         * Setup relation between pci_dev & netdev
        err = bnad_init(bnad, pdev, netdev);
        if (err)
                goto pci_uninit;
+
        /* Initialize netdev structure, set up ethtool ops */
        bnad_netdev_init(bnad, using_dac);
 
        /* Set link to down state */
        netif_carrier_off(netdev);
 
-       bnad_enable_msix(bnad);
-
        /* Get resource requirement form bna */
+       spin_lock_irqsave(&bnad->bna_lock, flags);
        bna_res_req(&bnad->res_info[0]);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        /* Allocate resources from bna */
-       err = bnad_res_alloc(bnad);
+       err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
        if (err)
-               goto free_netdev;
+               goto drv_uninit;
 
        bna = &bnad->bna;
 
        pcidev_info.device_id = bnad->pcidev->device;
        pcidev_info.pci_bar_kva = bnad->bar0;
 
-       mutex_lock(&bnad->conf_mutex);
-
        spin_lock_irqsave(&bnad->bna_lock, flags);
        bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        bnad->stats.bna_stats = &bna->stats;
 
+       bnad_enable_msix(bnad);
+       err = bnad_mbox_irq_alloc(bnad);
+       if (err)
+               goto res_free;
+
+
        /* Set up timers */
-       setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
+       setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
                                ((unsigned long)bnad));
-       setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
+       setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
                                ((unsigned long)bnad));
-       setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout,
+       setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
                                ((unsigned long)bnad));
-       setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout,
+       setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
                                ((unsigned long)bnad));
 
        /* Now start the timer before calling IOC */
-       mod_timer(&bnad->bna.device.ioc.iocpf_timer,
+       mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
                  jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
 
        /*
         * Start the chip
-        * Don't care even if err != 0, bna state machine will
-        * deal with it
+        * If the call back comes with error, we bail out.
+        * This is a catastrophic error.
         */
-       err = bnad_device_enable(bnad);
+       err = bnad_ioceth_enable(bnad);
+       if (err) {
+               pr_err("BNA: Initialization failed err=%d\n",
+                      err);
+               goto probe_success;
+       }
+
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
+               bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
+               bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
+                       bna_attr(bna)->num_rxp - 1);
+               if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
+                       bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
+                       err = -EIO;
+       }
+       bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+       err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
+       if (err)
+               goto disable_ioceth;
+
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        /* Get the burnt-in mac */
        spin_lock_irqsave(&bnad->bna_lock, flags);
-       bna_port_mac_get(&bna->port, &bnad->perm_addr);
+       bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
        bnad_set_netdev_perm_addr(bnad);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
-       mutex_unlock(&bnad->conf_mutex);
-
        /* Finally, reguister with net_device layer */
        err = register_netdev(netdev);
        if (err) {
                pr_err("BNA : Registering with netdev failed\n");
-               goto disable_device;
+               goto probe_uninit;
        }
+       set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
 
+probe_success:
+       mutex_unlock(&bnad->conf_mutex);
        return 0;
 
-disable_device:
-       mutex_lock(&bnad->conf_mutex);
-       bnad_device_disable(bnad);
-       del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
-       del_timer_sync(&bnad->bna.device.ioc.sem_timer);
-       del_timer_sync(&bnad->bna.device.ioc.hb_timer);
+probe_uninit:
+       bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
+disable_ioceth:
+       bnad_ioceth_disable(bnad);
+       del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
+       del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
+       del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
        spin_lock_irqsave(&bnad->bna_lock, flags);
        bna_uninit(bna);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
-       mutex_unlock(&bnad->conf_mutex);
-
-       bnad_res_free(bnad);
+       bnad_mbox_irq_free(bnad);
        bnad_disable_msix(bnad);
+res_free:
+       bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
+drv_uninit:
+       bnad_uninit(bnad);
 pci_uninit:
        bnad_pci_uninit(pdev);
+       mutex_unlock(&bnad->conf_mutex);
        bnad_lock_uninit(bnad);
-       bnad_uninit(bnad);
 free_netdev:
        free_netdev(netdev);
        return err;
        bnad = netdev_priv(netdev);
        bna = &bnad->bna;
 
-       unregister_netdev(netdev);
+       if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
+               unregister_netdev(netdev);
 
        mutex_lock(&bnad->conf_mutex);
-       bnad_device_disable(bnad);
-       del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
-       del_timer_sync(&bnad->bna.device.ioc.sem_timer);
-       del_timer_sync(&bnad->bna.device.ioc.hb_timer);
+       bnad_ioceth_disable(bnad);
+       del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
+       del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
+       del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
        spin_lock_irqsave(&bnad->bna_lock, flags);
        bna_uninit(bna);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
-       mutex_unlock(&bnad->conf_mutex);
 
-       bnad_res_free(bnad);
+       bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
+       bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
+       bnad_mbox_irq_free(bnad);
        bnad_disable_msix(bnad);
        bnad_pci_uninit(pdev);
+       mutex_unlock(&bnad->conf_mutex);
        bnad_lock_uninit(bnad);
        bnad_uninit(bnad);
        free_netdev(netdev);
 
 
 #define BNAD_MAX_RXS           1
 #define BNAD_MAX_RXPS_PER_RX   16
+#define BNAD_MAX_RXQ_PER_RXP   2
 
 /*
  * Control structure pointed to ccb->ctrl, which
 #define BNAD_STATS_TIMER_FREQ          1000    /* in msecs */
 #define BNAD_DIM_TIMER_FREQ            1000    /* in msecs */
 
+#define BNAD_IOCETH_TIMEOUT         10000
+
 #define BNAD_MAX_Q_DEPTH               0x10000
 #define BNAD_MIN_Q_DEPTH               0x200
 
 #define BNAD_RXQ_REFILL                        0
 #define BNAD_RXQ_STARTED               1
 
+/* Resource limits */
+#define BNAD_NUM_TXQ                   (bnad->num_tx * bnad->num_txq_per_tx)
+#define BNAD_NUM_RXP                   (bnad->num_rx * bnad->num_rxp_per_rx)
+
 /*
  * DATA STRUCTURES
  */
        struct completion       tx_comp;
        struct completion       rx_comp;
        struct completion       stats_comp;
-       struct completion       port_comp;
+       struct completion       enet_comp;
+       struct completion       mtu_comp;
 
        u8                      ioc_comp_status;
        u8                      ucast_comp_status;
        u8                      rx_comp_status;
        u8                      stats_comp_status;
        u8                      port_comp_status;
+       u8                      mtu_comp_status;
 };
 
 /* Tx Rx Control Stats */
        u64             netif_rx_dropped;
 
        u64             link_toggle;
+       u64             cee_toggle;
        u64             cee_up;
 
        u64             rxp_info_alloc_failed;
 struct bnad_tx_info {
        struct bna_tx *tx; /* 1:1 between tx_info & tx */
        struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
+       u32 tx_id;
 } ____cacheline_aligned;
 
 struct bnad_rx_info {
        struct bna_rx *rx; /* 1:1 between rx_info & rx */
 
        struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXPS_PER_RX];
+       u32 rx_id;
 } ____cacheline_aligned;
 
 /* Unmap queues for Tx / Rx cleanup */
 /* Defines for run_flags bit-mask */
 /* Set, tested & cleared using xxx_bit() functions */
 /* Values indicated bit positions */
-#define        BNAD_RF_CEE_RUNNING             1
+#define BNAD_RF_CEE_RUNNING            0
+#define BNAD_RF_MTU_SET                1
 #define BNAD_RF_MBOX_IRQ_DISABLED      2
-#define BNAD_RF_RX_STARTED             3
+#define BNAD_RF_NETDEV_REGISTERED      3
 #define BNAD_RF_DIM_TIMER_RUNNING      4
 #define BNAD_RF_STATS_TIMER_RUNNING    5
-#define BNAD_RF_TX_SHUTDOWN_DELAYED    6
-#define BNAD_RF_RX_SHUTDOWN_DELAYED    7
+#define BNAD_RF_TX_PRIO_SET            6
+
+
+/* Define for Fast Path flags */
+/* Defined as bit positions */
+#define BNAD_FP_IN_RX_PATH           0
 
 struct bnad {
        struct net_device       *netdev;
 
        /* Control path resources, memory & irq */
        struct bna_res_info res_info[BNA_RES_T_MAX];
+       struct bna_res_info mod_res_info[BNA_MOD_RES_T_MAX];
        struct bnad_tx_res_info tx_res_info[BNAD_MAX_TXS];
        struct bnad_rx_res_info rx_res_info[BNAD_MAX_RXS];
 
 extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
 extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
 
-extern int bnad_setup_rx(struct bnad *bnad, uint rx_id);
-extern int bnad_setup_tx(struct bnad *bnad, uint tx_id);
-extern void bnad_cleanup_tx(struct bnad *bnad, uint tx_id);
-extern void bnad_cleanup_rx(struct bnad *bnad, uint rx_id);
+extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
+extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
+extern void bnad_cleanup_tx(struct bnad *bnad, u32 tx_id);
+extern void bnad_cleanup_rx(struct bnad *bnad, u32 rx_id);
 
 /* Timer start/stop protos */
 extern void bnad_dim_timer_start(struct bnad *bnad);
 
 
 #define BNAD_NUM_TXF_COUNTERS 12
 #define BNAD_NUM_RXF_COUNTERS 10
-#define BNAD_NUM_CQ_COUNTERS 3
+#define BNAD_NUM_CQ_COUNTERS (3 + 5)
 #define BNAD_NUM_RXQ_COUNTERS 6
 #define BNAD_NUM_TXQ_COUNTERS 5
 
 #define BNAD_ETHTOOL_STATS_NUM                                         \
        (sizeof(struct rtnl_link_stats64) / sizeof(u64) +       \
        sizeof(struct bnad_drv_stats) / sizeof(u64) +           \
-       offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64))
+       offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
 
 static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
        "rx_packets",
        ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
        if (ioc_attr) {
                spin_lock_irqsave(&bnad->bna_lock, flags);
-               bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
+               bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
                spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
                strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
        struct bnad *bnad = netdev_priv(netdev);
 
        pauseparam->autoneg = 0;
-       pauseparam->rx_pause = bnad->bna.port.pause_config.rx_pause;
-       pauseparam->tx_pause = bnad->bna.port.pause_config.tx_pause;
+       pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause;
+       pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause;
 }
 
 static int
                return -EINVAL;
 
        mutex_lock(&bnad->conf_mutex);
-       if (pauseparam->rx_pause != bnad->bna.port.pause_config.rx_pause ||
-           pauseparam->tx_pause != bnad->bna.port.pause_config.tx_pause) {
+       if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause ||
+           pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) {
                pause_config.rx_pause = pauseparam->rx_pause;
                pause_config.tx_pause = pauseparam->tx_pause;
                spin_lock_irqsave(&bnad->bna_lock, flags);
-               bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
+               bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
                spin_unlock_irqrestore(&bnad->bna_lock, flags);
        }
        mutex_unlock(&bnad->conf_mutex);
 {
        struct bnad *bnad = netdev_priv(netdev);
        int i, j, q_num;
-       u64 bmap;
+       u32 bmap;
 
        mutex_lock(&bnad->conf_mutex);
 
                               ETH_GSTRING_LEN);
                        string += ETH_GSTRING_LEN;
                }
-               bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
-                       ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
-               for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
+               bmap = bna_tx_rid_mask(&bnad->bna);
+               for (i = 0; bmap; i++) {
                        if (bmap & 1) {
                                sprintf(string, "txf%d_ucast_octets", i);
                                string += ETH_GSTRING_LEN;
                        bmap >>= 1;
                }
 
-               bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
-                       ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
-               for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+               bmap = bna_rx_rid_mask(&bnad->bna);
+               for (i = 0; bmap; i++) {
                        if (bmap & 1) {
                                sprintf(string, "rxf%d_ucast_octets", i);
                                string += ETH_GSTRING_LEN;
 {
        struct bnad *bnad = netdev_priv(netdev);
        int i, j, count, rxf_active_num = 0, txf_active_num = 0;
-       u64 bmap;
+       u32 bmap;
 
-       bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
-                       ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
-       for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
+       bmap = bna_tx_rid_mask(&bnad->bna);
+       for (i = 0; bmap; i++) {
                if (bmap & 1)
                        txf_active_num++;
                bmap >>= 1;
        }
-       bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
-                       ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
-       for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+       bmap = bna_rx_rid_mask(&bnad->bna);
+       for (i = 0; bmap; i++) {
                if (bmap & 1)
                        rxf_active_num++;
                bmap >>= 1;
        unsigned long flags;
        struct rtnl_link_stats64 *net_stats64;
        u64 *stats64;
-       u64 bmap;
+       u32 bmap;
 
        mutex_lock(&bnad->conf_mutex);
        if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
                buf[bi++] = stats64[i];
 
        /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
-       stats64 = (u64 *) bnad->stats.bna_stats->hw_stats;
+       stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats;
        for (i = 0;
-            i < offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64);
+            i < offsetof(struct bfi_enet_stats, rxf_stats[0]) /
+               sizeof(u64);
             i++)
                buf[bi++] = stats64[i];
 
        /* Fill txf stats into ethtool buffers */
-       bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
-                       ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
-       for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
+       bmap = bna_tx_rid_mask(&bnad->bna);
+       for (i = 0; bmap; i++) {
                if (bmap & 1) {
                        stats64 = (u64 *)&bnad->stats.bna_stats->
-                                               hw_stats->txf_stats[i];
-                       for (j = 0; j < sizeof(struct bfi_ll_stats_txf) /
+                                               hw_stats.txf_stats[i];
+                       for (j = 0; j < sizeof(struct bfi_enet_stats_txf) /
                                        sizeof(u64); j++)
                                buf[bi++] = stats64[j];
                }
        }
 
        /*  Fill rxf stats into ethtool buffers */
-       bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
-                       ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
-       for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+       bmap = bna_rx_rid_mask(&bnad->bna);
+       for (i = 0; bmap; i++) {
                if (bmap & 1) {
                        stats64 = (u64 *)&bnad->stats.bna_stats->
-                                               hw_stats->rxf_stats[i];
-                       for (j = 0; j < sizeof(struct bfi_ll_stats_rxf) /
+                                               hw_stats.rxf_stats[i];
+                       for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) /
                                        sizeof(u64); j++)
                                buf[bi++] = stats64[j];
                }
 
 
 extern char bfa_version[];
 
-#define        CNA_FW_FILE_CT  "ctfw_cna.bin"
+#define        CNA_FW_FILE_CT  "ctfw.bin"
 #define FC_SYMNAME_MAX 256     /*!< max name server symbolic name size */
 
 #pragma pack(1)
        }                                                               \
 }
 
+/*
+ * bfa_q_deq_tail - dequeue an element from tail of the queue
+ */
+#define bfa_q_deq_tail(_q, _qe) {                                      \
+       if (!list_empty(_q)) {                                          \
+               *((struct list_head **) (_qe)) = bfa_q_prev(_q);        \
+               bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) =  \
+                                               (struct list_head *) (_q); \
+               bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
+               bfa_q_qe_init(*((struct list_head **) _qe));            \
+       } else {                                                        \
+               *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
+       }                                                               \
+}
+
+/*
+ * bfa_add_tail_head - enqueue an element at the head of queue
+ */
+#define bfa_q_enq_head(_q, _qe) {                                      \
+       if (!(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL))    \
+               pr_err("Assertion failure: %s:%d: %d",                  \
+                       __FILE__, __LINE__,                             \
+               (bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL));\
+       bfa_q_next(_qe) = bfa_q_next(_q);                               \
+       bfa_q_prev(_qe) = (struct list_head *) (_q);                    \
+       bfa_q_prev(bfa_q_next(_q)) = (struct list_head *) (_qe);        \
+       bfa_q_next(_q) = (struct list_head *) (_qe);                    \
+}
+
 #endif /* __CNA_H__ */