]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
scsi/bfa driver update from 2.3.2.3 to 3.0.2.2
authorMaxim Uvarov <maxim.uvarov@oracle.com>
Thu, 12 Jan 2012 21:14:37 +0000 (13:14 -0800)
committerMaxim Uvarov <maxim.uvarov@oracle.com>
Thu, 12 Jan 2012 23:08:03 +0000 (15:08 -0800)
Orabug: 13254
Signed-off-by: Maxim Uvarov <maxim.uvarov@oracle.com>
40 files changed:
drivers/scsi/bfa/Makefile
drivers/scsi/bfa/bfa.h
drivers/scsi/bfa/bfa_core.c
drivers/scsi/bfa/bfa_defs.h
drivers/scsi/bfa/bfa_defs_fcs.h
drivers/scsi/bfa/bfa_defs_svc.h
drivers/scsi/bfa/bfa_fc.h
drivers/scsi/bfa/bfa_fcbuild.c
drivers/scsi/bfa/bfa_fcbuild.h
drivers/scsi/bfa/bfa_fcpim.c
drivers/scsi/bfa/bfa_fcpim.h
drivers/scsi/bfa/bfa_fcs.c
drivers/scsi/bfa/bfa_fcs.h
drivers/scsi/bfa/bfa_fcs_fcpim.c
drivers/scsi/bfa/bfa_fcs_lport.c
drivers/scsi/bfa/bfa_fcs_rport.c
drivers/scsi/bfa/bfa_hw_cb.c
drivers/scsi/bfa/bfa_hw_ct.c
drivers/scsi/bfa/bfa_ioc.c
drivers/scsi/bfa/bfa_ioc.h
drivers/scsi/bfa/bfa_ioc_cb.c
drivers/scsi/bfa/bfa_ioc_ct.c
drivers/scsi/bfa/bfa_modules.h
drivers/scsi/bfa/bfa_port.c
drivers/scsi/bfa/bfa_port.h
drivers/scsi/bfa/bfa_svc.c
drivers/scsi/bfa/bfa_svc.h
drivers/scsi/bfa/bfad.c
drivers/scsi/bfa/bfad_attr.c
drivers/scsi/bfa/bfad_bsg.c [new file with mode: 0644]
drivers/scsi/bfa/bfad_bsg.h [new file with mode: 0644]
drivers/scsi/bfa/bfad_debugfs.c
drivers/scsi/bfa/bfad_drv.h
drivers/scsi/bfa/bfad_im.c
drivers/scsi/bfa/bfad_im.h
drivers/scsi/bfa/bfi.h
drivers/scsi/bfa/bfi_cbreg.h [deleted file]
drivers/scsi/bfa/bfi_ctreg.h [deleted file]
drivers/scsi/bfa/bfi_ms.h
drivers/scsi/bfa/bfi_reg.h [new file with mode: 0644]

index 4ce6f4942327f860461fafc6055a1a3041fc37e4..478d126ed2da2d78e2f53c9d0247ff854316815a 100644 (file)
@@ -1,6 +1,6 @@
-obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
+obj-m := bfa.o
 
-bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o
+bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o bfad_bsg.o
 bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o
 bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o
 bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_svc.o
index 7be6b5a8114b4d061b945f98cfc2a1f9d075b7da..a796de9350541a4fcae375681bb911d1fc27ec00 100644 (file)
@@ -27,7 +27,7 @@
 struct bfa_s;
 
 typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
-typedef void    (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
+typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
 
 /*
  * Interrupt message handlers
@@ -54,7 +54,8 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
         ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \
                   + bfa_reqq_pi((__bfa), (__reqq)))))
 
-#define bfa_reqq_produce(__bfa, __reqq)        do {                            \
+#define bfa_reqq_produce(__bfa, __reqq, __mh)  do {                    \
+               (__mh).mtag.h2i.qid     = (__bfa)->iocfc.hw_qid[__reqq];\
                (__bfa)->iocfc.req_cq_pi[__reqq]++;                     \
                (__bfa)->iocfc.req_cq_pi[__reqq] &=                     \
                        ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
@@ -75,16 +76,6 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
        (__index) &= ((__size) - 1);                    \
 } while (0)
 
-/*
- * Queue element to wait for room in request queue. FIFO order is
- * maintained when fullfilling requests.
- */
-struct bfa_reqq_wait_s {
-       struct list_head        qe;
-       void            (*qresume) (void *cbarg);
-       void            *cbarg;
-};
-
 /*
  * Circular queue usage assignments
  */
@@ -128,21 +119,10 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
 
 #define bfa_reqq_wcancel(__wqe)        list_del(&(__wqe)->qe)
 
-
-/*
- * Generic BFA callback element.
- */
-struct bfa_cb_qe_s {
-       struct list_head         qe;
-       bfa_cb_cbfn_t  cbfn;
-       bfa_boolean_t   once;
-       u32             rsvd;
-       void           *cbarg;
-};
-
 #define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do {    \
                (__hcb_qe)->cbfn  = (__cbfn);      \
                (__hcb_qe)->cbarg = (__cbarg);      \
+               (__hcb_qe)->pre_rmv = BFA_FALSE;                \
                list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q);      \
        } while (0)
 
@@ -157,6 +137,11 @@ struct bfa_cb_qe_s {
                }                                                       \
        } while (0)
 
+#define bfa_cb_queue_status(__bfa, __hcb_qe, __status) do {            \
+               (__hcb_qe)->fw_status = (__status);                     \
+               list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q);       \
+} while (0)
+
 #define bfa_cb_queue_done(__hcb_qe) do {       \
                (__hcb_qe)->once = BFA_FALSE;   \
        } while (0)
@@ -172,44 +157,14 @@ struct bfa_pciid_s {
 
 extern char     bfa_version[];
 
-/*
- * BFA memory resources
- */
-enum bfa_mem_type {
-       BFA_MEM_TYPE_KVA = 1,   /*  Kernel Virtual Memory *(non-dma-able) */
-       BFA_MEM_TYPE_DMA = 2,   /*  DMA-able memory */
-       BFA_MEM_TYPE_MAX = BFA_MEM_TYPE_DMA,
-};
-
-struct bfa_mem_elem_s {
-       enum bfa_mem_type mem_type;     /* see enum bfa_mem_type */
-       u32     mem_len;        /*  Total Length in Bytes       */
-       u8              *kva;           /*  kernel virtual address      */
-       u64     dma;            /*  dma address if DMA memory   */
-       u8              *kva_curp;      /*  kva allocation cursor       */
-       u64     dma_curp;       /*  dma allocation cursor       */
-};
-
-struct bfa_meminfo_s {
-       struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX];
-};
-#define bfa_meminfo_kva(_m)                            \
-       ((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp)
-#define bfa_meminfo_dma_virt(_m)                       \
-       ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp)
-#define bfa_meminfo_dma_phys(_m)                       \
-       ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
-
 struct bfa_iocfc_regs_s {
        void __iomem    *intr_status;
        void __iomem    *intr_mask;
        void __iomem    *cpe_q_pi[BFI_IOC_MAX_CQS];
        void __iomem    *cpe_q_ci[BFI_IOC_MAX_CQS];
-       void __iomem    *cpe_q_depth[BFI_IOC_MAX_CQS];
        void __iomem    *cpe_q_ctrl[BFI_IOC_MAX_CQS];
        void __iomem    *rme_q_ci[BFI_IOC_MAX_CQS];
        void __iomem    *rme_q_pi[BFI_IOC_MAX_CQS];
-       void __iomem    *rme_q_depth[BFI_IOC_MAX_CQS];
        void __iomem    *rme_q_ctrl[BFI_IOC_MAX_CQS];
 };
 
@@ -229,27 +184,57 @@ struct bfa_msix_s {
 struct bfa_hwif_s {
        void (*hw_reginit)(struct bfa_s *bfa);
        void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
-       void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
+       void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq, u32 ci);
        void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
-       void (*hw_msix_install)(struct bfa_s *bfa);
+       void (*hw_msix_ctrl_install)(struct bfa_s *bfa);
+       void (*hw_msix_queue_install)(struct bfa_s *bfa);
        void (*hw_msix_uninstall)(struct bfa_s *bfa);
        void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
        void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
                                u32 *nvecs, u32 *maxvec);
        void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
                                       u32 *end);
+       int     cpe_vec_q0;
+       int     rme_vec_q0;
 };
 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
 
+struct bfa_faa_cbfn_s {
+       bfa_cb_iocfc_t  faa_cbfn;
+       void            *faa_cbarg;
+};
+
+#define BFA_FAA_ENABLED                1
+#define BFA_FAA_DISABLED       2
+
+/*
+ *     FAA attributes
+ */
+struct bfa_faa_attr_s {
+       wwn_t   faa;
+       u8      faa_state;
+       u8      pwwn_source;
+       u8      rsvd[6];
+};
+
+struct bfa_faa_args_s {
+       struct bfa_faa_attr_s   *faa_attr;
+       struct bfa_faa_cbfn_s   faa_cb;
+       u8                      faa_state;
+       bfa_boolean_t           busy;
+};
+
 struct bfa_iocfc_s {
        struct bfa_s            *bfa;
        struct bfa_iocfc_cfg_s  cfg;
        int                     action;
        u32             req_cq_pi[BFI_IOC_MAX_CQS];
        u32             rsp_cq_ci[BFI_IOC_MAX_CQS];
+       u8              hw_qid[BFI_IOC_MAX_CQS];
        struct bfa_cb_qe_s      init_hcb_qe;
        struct bfa_cb_qe_s      stop_hcb_qe;
        struct bfa_cb_qe_s      dis_hcb_qe;
+       struct bfa_cb_qe_s      en_hcb_qe;
        struct bfa_cb_qe_s      stats_hcb_qe;
        bfa_boolean_t           cfgdone;
 
@@ -257,7 +242,6 @@ struct bfa_iocfc_s {
        struct bfi_iocfc_cfg_s *cfginfo;
        struct bfa_dma_s        cfgrsp_dma;
        struct bfi_iocfc_cfgrsp_s *cfgrsp;
-       struct bfi_iocfc_cfg_reply_s *cfg_reply;
        struct bfa_dma_s        req_cq_ba[BFI_IOC_MAX_CQS];
        struct bfa_dma_s        req_cq_shadow_ci[BFI_IOC_MAX_CQS];
        struct bfa_dma_s        rsp_cq_ba[BFI_IOC_MAX_CQS];
@@ -267,18 +251,40 @@ struct bfa_iocfc_s {
        bfa_cb_iocfc_t          updateq_cbfn; /*  bios callback function */
        void                    *updateq_cbarg; /*  bios callback arg */
        u32     intr_mask;
+       struct bfa_faa_args_s   faa_args;
+       struct bfa_mem_dma_s    ioc_dma;
+       struct bfa_mem_dma_s    iocfc_dma;
+       struct bfa_mem_dma_s    reqq_dma[BFI_IOC_MAX_CQS];
+       struct bfa_mem_dma_s    rspq_dma[BFI_IOC_MAX_CQS];
+       struct bfa_mem_kva_s    kva_seg;
 };
 
-#define bfa_lpuid(__bfa)                                               \
-       bfa_ioc_portid(&(__bfa)->ioc)
+#define BFA_MEM_IOC_DMA(_bfa)          (&((_bfa)->iocfc.ioc_dma))
+#define BFA_MEM_IOCFC_DMA(_bfa)                (&((_bfa)->iocfc.iocfc_dma))
+#define BFA_MEM_REQQ_DMA(_bfa, _qno)   (&((_bfa)->iocfc.reqq_dma[(_qno)]))
+#define BFA_MEM_RSPQ_DMA(_bfa, _qno)   (&((_bfa)->iocfc.rspq_dma[(_qno)]))
+#define BFA_MEM_IOCFC_KVA(_bfa)                (&((_bfa)->iocfc.kva_seg))
+
+#define bfa_fn_lpu(__bfa)      \
+       bfi_fn_lpu(bfa_ioc_pcifn(&(__bfa)->ioc), bfa_ioc_portid(&(__bfa)->ioc))
 #define bfa_msix_init(__bfa, __nvecs)                                  \
        ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
-#define bfa_msix_install(__bfa)                                                \
-       ((__bfa)->iocfc.hwif.hw_msix_install(__bfa))
+#define bfa_msix_ctrl_install(__bfa)                                   \
+       ((__bfa)->iocfc.hwif.hw_msix_ctrl_install(__bfa))
+#define bfa_msix_queue_install(__bfa)                                  \
+       ((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa))
 #define bfa_msix_uninstall(__bfa)                                      \
        ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
-#define bfa_isr_mode_set(__bfa, __msix)                                        \
-       ((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix))
+#define bfa_isr_rspq_ack(__bfa, __queue, __ci)                         \
+       ((__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue, __ci))
+#define bfa_isr_reqq_ack(__bfa, __queue) do {                          \
+       if ((__bfa)->iocfc.hwif.hw_reqq_ack)                            \
+               (__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue);        \
+} while (0)
+#define bfa_isr_mode_set(__bfa, __msix) do {                           \
+       if ((__bfa)->iocfc.hwif.hw_isr_mode_set)                        \
+               (__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix);     \
+} while (0)
 #define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec)           \
        ((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap,           \
                                        __nvecs, __maxvec))
@@ -290,17 +296,17 @@ struct bfa_iocfc_s {
 /*
  * FC specific IOC functions.
  */
-void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-                      u32 *dm_len);
+void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg,
+                       struct bfa_meminfo_s *meminfo,
+                       struct bfa_s *bfa);
 void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
                      struct bfa_iocfc_cfg_s *cfg,
-                     struct bfa_meminfo_s *meminfo,
                      struct bfa_pcidev_s *pcidev);
 void bfa_iocfc_init(struct bfa_s *bfa);
 void bfa_iocfc_start(struct bfa_s *bfa);
 void bfa_iocfc_stop(struct bfa_s *bfa);
 void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
-void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa);
+void bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa);
 bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
 void bfa_iocfc_reset_queues(struct bfa_s *bfa);
 
@@ -310,10 +316,10 @@ void bfa_msix_rspq(struct bfa_s *bfa, int vec);
 void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
 
 void bfa_hwcb_reginit(struct bfa_s *bfa);
-void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
-void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
+void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
 void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
-void bfa_hwcb_msix_install(struct bfa_s *bfa);
+void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa);
+void bfa_hwcb_msix_queue_install(struct bfa_s *bfa);
 void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
 void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
 void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
@@ -321,10 +327,13 @@ void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
 void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
                                 u32 *end);
 void bfa_hwct_reginit(struct bfa_s *bfa);
+void bfa_hwct2_reginit(struct bfa_s *bfa);
 void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
-void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
+void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
+void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
 void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
-void bfa_hwct_msix_install(struct bfa_s *bfa);
+void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa);
+void bfa_hwct_msix_queue_install(struct bfa_s *bfa);
 void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
 void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
 void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
@@ -373,11 +382,28 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
 #define bfa_get_fw_clock_res(__bfa)            \
        ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
 
+/*
+ * lun mask macros return NULL when min cfg is enabled and there is
+ * no memory allocated for lunmask.
+ */
+#define bfa_get_lun_mask(__bfa)                                        \
+       ((&(__bfa)->modules.dconf_mod)->min_cfg) ? NULL :       \
+        (&(BFA_DCONF_MOD(__bfa)->dconf->lun_mask))
+
+#define bfa_get_lun_mask_list(_bfa)                            \
+       ((&(_bfa)->modules.dconf_mod)->min_cfg) ? NULL :        \
+        (bfa_get_lun_mask(_bfa)->lun_list)
+
+#define bfa_get_lun_mask_status(_bfa)                          \
+       (((&(_bfa)->modules.dconf_mod)->min_cfg)                \
+        ? BFA_LUNMASK_MINCFG : ((bfa_get_lun_mask(_bfa))->status))
+
 void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
 void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
 void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
 void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
-                        struct bfa_meminfo_s *meminfo);
+                       struct bfa_meminfo_s *meminfo,
+                       struct bfa_s *bfa);
 void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
                struct bfa_meminfo_s *meminfo,
                struct bfa_pcidev_s *pcidev);
@@ -402,7 +428,22 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
 
 void bfa_iocfc_enable(struct bfa_s *bfa);
 void bfa_iocfc_disable(struct bfa_s *bfa);
+void bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status);
 #define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout)                \
        bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
 
+struct bfa_cb_pending_q_s {
+       struct bfa_cb_qe_s      hcb_qe;
+       void                    *data;  /* Driver buffer */
+};
+
+/* Common macros to operate on pending stats/attr apis */
+#define bfa_pending_q_init(__qe, __cbfn, __cbarg, __data) do { \
+       bfa_q_qe_init(&((__qe)->hcb_qe.qe));                    \
+       (__qe)->hcb_qe.cbfn = (__cbfn);                         \
+       (__qe)->hcb_qe.cbarg = (__cbarg);                       \
+       (__qe)->hcb_qe.pre_rmv = BFA_TRUE;                      \
+       (__qe)->data = (__data);                                \
+} while (0)
+
 #endif /* __BFA_H__ */
index 91838c51fb76458fc573958581f7453ee1870952..4bd546bcc240740fdadd04fd435828b4dc94f4f6 100644 (file)
@@ -17,7 +17,7 @@
 
 #include "bfad_drv.h"
 #include "bfa_modules.h"
-#include "bfi_ctreg.h"
+#include "bfi_reg.h"
 
 BFA_TRC_FILE(HAL, CORE);
 
@@ -25,13 +25,15 @@ BFA_TRC_FILE(HAL, CORE);
  * BFA module list terminated by NULL
  */
 static struct bfa_module_s *hal_mods[] = {
+       &hal_mod_fcdiag,
        &hal_mod_sgpg,
        &hal_mod_fcport,
        &hal_mod_fcxp,
        &hal_mod_lps,
        &hal_mod_uf,
        &hal_mod_rport,
-       &hal_mod_fcpim,
+       &hal_mod_fcp,
+       &hal_mod_dconf,
        NULL
 };
 
@@ -41,7 +43,7 @@ static struct bfa_module_s *hal_mods[] = {
 static bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
        bfa_isr_unhandled,      /* NONE */
        bfa_isr_unhandled,      /* BFI_MC_IOC */
-       bfa_isr_unhandled,      /* BFI_MC_DIAG */
+       bfa_fcdiag_intr,        /* BFI_MC_DIAG */
        bfa_isr_unhandled,      /* BFI_MC_FLASH */
        bfa_isr_unhandled,      /* BFI_MC_CEE */
        bfa_fcport_isr,         /* BFI_MC_FCPORT */
@@ -51,7 +53,7 @@ static bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
        bfa_fcxp_isr,           /* BFI_MC_FCXP */
        bfa_lps_isr,            /* BFI_MC_LPS */
        bfa_rport_isr,          /* BFI_MC_RPORT */
-       bfa_itnim_isr,          /* BFI_MC_ITNIM */
+       bfa_itn_isr,            /* BFI_MC_ITN */
        bfa_isr_unhandled,      /* BFI_MC_IOIM_READ */
        bfa_isr_unhandled,      /* BFI_MC_IOIM_WRITE */
        bfa_isr_unhandled,      /* BFI_MC_IOIM_IO */
@@ -89,23 +91,78 @@ static bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[BFI_MC_MAX] = {
 
 
 static void
-bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
+bfa_com_port_attach(struct bfa_s *bfa)
 {
        struct bfa_port_s       *port = &bfa->modules.port;
-       u32                     dm_len;
-       u8                      *dm_kva;
-       u64                     dm_pa;
+       struct bfa_mem_dma_s    *port_dma = BFA_MEM_PORT_DMA(bfa);
 
-       dm_len = bfa_port_meminfo();
-       dm_kva = bfa_meminfo_dma_virt(mi);
-       dm_pa  = bfa_meminfo_dma_phys(mi);
-
-       memset(port, 0, sizeof(struct bfa_port_s));
        bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
-       bfa_port_mem_claim(port, dm_kva, dm_pa);
+       bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
+}
+
+/*
+ * ablk module attach
+ */
+static void
+bfa_com_ablk_attach(struct bfa_s *bfa)
+{
+       struct bfa_ablk_s       *ablk = &bfa->modules.ablk;
+       struct bfa_mem_dma_s    *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
+
+       bfa_ablk_attach(ablk, &bfa->ioc);
+       bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
+}
+
+static void
+bfa_com_cee_attach(struct bfa_s *bfa)
+{
+       struct bfa_cee_s        *cee = &bfa->modules.cee;
+       struct bfa_mem_dma_s    *cee_dma = BFA_MEM_CEE_DMA(bfa);
+
+       cee->trcmod = bfa->trcmod;
+       bfa_cee_attach(cee, &bfa->ioc, bfa);
+       bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp);
+}
+
+static void
+bfa_com_sfp_attach(struct bfa_s *bfa)
+{
+       struct bfa_sfp_s        *sfp = BFA_SFP_MOD(bfa);
+       struct bfa_mem_dma_s    *sfp_dma = BFA_MEM_SFP_DMA(bfa);
+
+       bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod);
+       bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp);
+}
+
+static void
+bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
+{
+       struct bfa_flash_s      *flash = BFA_FLASH(bfa);
+       struct bfa_mem_dma_s    *flash_dma = BFA_MEM_FLASH_DMA(bfa);
+
+       bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg);
+       bfa_flash_memclaim(flash, flash_dma->kva_curp,
+                          flash_dma->dma_curp, mincfg);
+}
+
+static void
+bfa_com_diag_attach(struct bfa_s *bfa)
+{
+       struct bfa_diag_s       *diag = BFA_DIAG_MOD(bfa);
+       struct bfa_mem_dma_s    *diag_dma = BFA_MEM_DIAG_DMA(bfa);
+
+       bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod);
+       bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp);
+}
+
+static void
+bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
+{
+       struct bfa_phy_s        *phy = BFA_PHY(bfa);
+       struct bfa_mem_dma_s    *phy_dma = BFA_MEM_PHY_DMA(bfa);
 
-       bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
-       bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
+       bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg);
+       bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg);
 }
 
 /*
@@ -122,6 +179,7 @@ enum {
        BFA_IOCFC_ACT_INIT      = 1,
        BFA_IOCFC_ACT_STOP      = 2,
        BFA_IOCFC_ACT_DISABLE   = 3,
+       BFA_IOCFC_ACT_ENABLE    = 4,
 };
 
 #define DEF_CFG_NUM_FABRICS            1
@@ -173,10 +231,88 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
        }
 }
 
+static inline void
+bfa_isr_rspq(struct bfa_s *bfa, int qid)
+{
+       struct bfi_msg_s *m;
+       u32     pi, ci;
+       struct list_head *waitq;
+
+       ci = bfa_rspq_ci(bfa, qid);
+       pi = bfa_rspq_pi(bfa, qid);
+
+       while (ci != pi) {
+               m = bfa_rspq_elem(bfa, qid, ci);
+               WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
+
+               bfa_isrs[m->mhdr.msg_class] (bfa, m);
+               CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
+       }
+
+       /*
+        * acknowledge RME completions and update CI
+        */
+       bfa_isr_rspq_ack(bfa, qid, ci);
+
+       /*
+        * Resume any pending requests in the corresponding reqq.
+        */
+       waitq = bfa_reqq(bfa, qid);
+       if (!list_empty(waitq))
+               bfa_reqq_resume(bfa, qid);
+}
+
+static inline void
+bfa_isr_reqq(struct bfa_s *bfa, int qid)
+{
+       struct list_head *waitq;
+
+       bfa_isr_reqq_ack(bfa, qid);
+
+       /*
+        * Resume any pending requests in the corresponding reqq.
+        */
+       waitq = bfa_reqq(bfa, qid);
+       if (!list_empty(waitq))
+               bfa_reqq_resume(bfa, qid);
+}
+
 void
 bfa_msix_all(struct bfa_s *bfa, int vec)
 {
-       bfa_intx(bfa);
+       u32     intr, qintr;
+       int     queue;
+
+       intr = readl(bfa->iocfc.bfa_regs.intr_status);
+       if (!intr)
+               return;
+
+       /*
+        * RME completion queue interrupt
+        */
+       qintr = intr & __HFN_INT_RME_MASK;
+       if (qintr && bfa->queue_process) {
+               for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+                       bfa_isr_rspq(bfa, queue);
+       }
+
+       intr &= ~qintr;
+       if (!intr)
+               return;
+
+       /*
+        * CPE completion queue interrupt
+        */
+       qintr = intr & __HFN_INT_CPE_MASK;
+       if (qintr && bfa->queue_process) {
+               for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+                       bfa_isr_reqq(bfa, queue);
+       }
+       intr &= ~qintr;
+       if (!intr)
+               return;
+
+       bfa_msix_lpu_err(bfa, intr);
 }
 
 bfa_boolean_t
@@ -186,20 +322,19 @@ bfa_intx(struct bfa_s *bfa)
        int queue;
 
        intr = readl(bfa->iocfc.bfa_regs.intr_status);
-       if (!intr)
-               return BFA_FALSE;
+
+       qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
+       if (qintr)
+               writel(qintr, bfa->iocfc.bfa_regs.intr_status);
 
        /*
-        * RME completion queue interrupt
+        * Unconditional RME completion queue interrupt
         */
-       qintr = intr & __HFN_INT_RME_MASK;
-       writel(qintr, bfa->iocfc.bfa_regs.intr_status);
-
-       for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
-               if (intr & (__HFN_INT_RME_Q0 << queue))
-                       bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
+       if (bfa->queue_process) {
+               for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+                       bfa_isr_rspq(bfa, queue);
        }
-       intr &= ~qintr;
+
        if (!intr)
                return BFA_TRUE;
 
@@ -207,11 +342,9 @@ bfa_intx(struct bfa_s *bfa)
         * CPE completion queue interrupt
         */
        qintr = intr & __HFN_INT_CPE_MASK;
-       writel(qintr, bfa->iocfc.bfa_regs.intr_status);
-
-       for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
-               if (intr & (__HFN_INT_CPE_Q0 << queue))
-                       bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
+       if (qintr && bfa->queue_process) {
+               for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+                       bfa_isr_reqq(bfa, queue);
        }
        intr &= ~qintr;
        if (!intr)
@@ -225,32 +358,25 @@ bfa_intx(struct bfa_s *bfa)
 void
 bfa_isr_enable(struct bfa_s *bfa)
 {
-       u32 intr_unmask;
+       u32 umsk;
        int pci_func = bfa_ioc_pcifn(&bfa->ioc);
 
        bfa_trc(bfa, pci_func);
 
-       bfa_msix_install(bfa);
-       intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
-                      __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
-                      __HFN_INT_LL_HALT);
-
-       if (pci_func == 0)
-               intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
-                               __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
-                               __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
-                               __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
-                               __HFN_INT_MBOX_LPU0);
-       else
-               intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
-                               __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
-                               __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
-                               __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
-                               __HFN_INT_MBOX_LPU1);
-
-       writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status);
-       writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask);
-       bfa->iocfc.intr_mask = ~intr_unmask;
+       bfa_msix_ctrl_install(bfa);
+
+       if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
+               umsk = __HFN_INT_ERR_MASK_CT2;
+               umsk |= pci_func == 0 ?
+                       __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
+       } else {
+               umsk = __HFN_INT_ERR_MASK;
+               umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
+       }
+
+       writel(umsk, bfa->iocfc.bfa_regs.intr_status);
+       writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
+       bfa->iocfc.intr_mask = ~umsk;
        bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
 }
 
@@ -263,20 +389,9 @@ bfa_isr_disable(struct bfa_s *bfa)
 }
 
 void
-bfa_msix_reqq(struct bfa_s *bfa, int qid)
+bfa_msix_reqq(struct bfa_s *bfa, int vec)
 {
-       struct list_head *waitq;
-
-       qid &= (BFI_IOC_MAX_CQS - 1);
-
-       bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
-
-       /*
-        * Resume any pending requests in the corresponding reqq.
-        */
-       waitq = bfa_reqq(bfa, qid);
-       if (!list_empty(waitq))
-               bfa_reqq_resume(bfa, qid);
+       bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
 }
 
 void
@@ -290,57 +405,38 @@ bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
 }
 
 void
-bfa_msix_rspq(struct bfa_s *bfa, int qid)
+bfa_msix_rspq(struct bfa_s *bfa, int vec)
 {
-       struct bfi_msg_s *m;
-       u32 pi, ci;
-       struct list_head *waitq;
-
-       qid &= (BFI_IOC_MAX_CQS - 1);
-
-       bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
-
-       ci = bfa_rspq_ci(bfa, qid);
-       pi = bfa_rspq_pi(bfa, qid);
-
-       if (bfa->rme_process) {
-               while (ci != pi) {
-                       m = bfa_rspq_elem(bfa, qid, ci);
-                       bfa_isrs[m->mhdr.msg_class] (bfa, m);
-                       CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
-               }
-       }
-
-       /*
-        * update CI
-        */
-       bfa_rspq_ci(bfa, qid) = pi;
-       writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
-       mmiowb();
-
-       /*
-        * Resume any pending requests in the corresponding reqq.
-        */
-       waitq = bfa_reqq(bfa, qid);
-       if (!list_empty(waitq))
-               bfa_reqq_resume(bfa, qid);
+       bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
 }
 
 void
 bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
 {
        u32 intr, curr_value;
+       bfa_boolean_t lpu_isr, halt_isr, pss_isr;
 
        intr = readl(bfa->iocfc.bfa_regs.intr_status);
 
-       if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
-               bfa_ioc_mbox_isr(&bfa->ioc);
+       if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
+               halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
+               pss_isr  = intr & __HFN_INT_ERR_PSS_CT2;
+               lpu_isr  = intr & (__HFN_INT_MBOX_LPU0_CT2 |
+                                  __HFN_INT_MBOX_LPU1_CT2);
+               intr    &= __HFN_INT_ERR_MASK_CT2;
+       } else {
+               halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ?
+                                         (intr & __HFN_INT_LL_HALT) : 0;
+               pss_isr  = intr & __HFN_INT_ERR_PSS;
+               lpu_isr  = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
+               intr    &= __HFN_INT_ERR_MASK;
+       }
 
-       intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
-               __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
+       if (lpu_isr)
+               bfa_ioc_mbox_isr(&bfa->ioc);
 
        if (intr) {
-               if (intr & __HFN_INT_LL_HALT) {
+               if (halt_isr) {
                        /*
                         * If LL_HALT bit is set then FW Init Halt LL Port
                         * Register needs to be cleared as well so Interrupt
@@ -351,7 +447,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
                        writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
                }
 
-               if (intr & __HFN_INT_ERR_PSS) {
+               if (pss_isr) {
                        /*
                         * ERR_PSS bit needs to be cleared as well in case
                         * interrups are shared so driver's interrupt handler is
@@ -359,7 +455,6 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
                         */
                        curr_value = readl(
                                        bfa->ioc.ioc_regs.pss_err_status_reg);
-                       curr_value &= __PSS_ERR_STATUS_SET;
                        writel(curr_value,
                                bfa->ioc.ioc_regs.pss_err_status_reg);
                }
@@ -377,41 +472,6 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
  *  BFA IOC private functions
  */
 
-static void
-bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
-{
-       int             i, per_reqq_sz, per_rspq_sz;
-
-       per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
-                                 BFA_DMA_ALIGN_SZ);
-       per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
-                                 BFA_DMA_ALIGN_SZ);
-
-       /*
-        * Calculate CQ size
-        */
-       for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
-               *dm_len = *dm_len + per_reqq_sz;
-               *dm_len = *dm_len + per_rspq_sz;
-       }
-
-       /*
-        * Calculate Shadow CI/PI size
-        */
-       for (i = 0; i < cfg->fwcfg.num_cqs; i++)
-               *dm_len += (2 * BFA_CACHELINE_SZ);
-}
-
-static void
-bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
-{
-       *dm_len +=
-               BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
-       *dm_len +=
-               BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
-                           BFA_CACHELINE_SZ);
-}
-
 /*
  * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
  */
@@ -433,8 +493,13 @@ bfa_iocfc_send_cfg(void *bfa_arg)
        /*
         * initialize IOC configuration info
         */
+       cfg_info->single_msix_vec = 0;
+       if (bfa->msix.nvecs == 1)
+               cfg_info->single_msix_vec = 1;
        cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
        cfg_info->num_cqs = cfg->fwcfg.num_cqs;
+       cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
+       cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
 
        bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
        /*
@@ -469,7 +534,7 @@ bfa_iocfc_send_cfg(void *bfa_arg)
         * dma map IOC configuration itself
         */
        bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
-                   bfa_lpuid(bfa));
+                   bfa_fn_lpu(bfa));
        bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
 
        bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
@@ -491,26 +556,40 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
        /*
         * Initialize chip specific handlers.
         */
-       if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
+       if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
                iocfc->hwif.hw_reginit = bfa_hwct_reginit;
                iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
                iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
                iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
-               iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
+               iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
+               iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
                iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
                iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
                iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
                iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
+               iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
+               iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
        } else {
                iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
-               iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
+               iocfc->hwif.hw_reqq_ack = NULL;
                iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
                iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
-               iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
+               iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
+               iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
                iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
                iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
                iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
                iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
+               iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
+                       bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
+               iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
+                       bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
+       }
+
+       if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
+               iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
+               iocfc->hwif.hw_isr_mode_set = NULL;
+               iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack;
        }
 
        iocfc->hwif.hw_reginit(bfa);
@@ -518,48 +597,42 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 }
 
 static void
-bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
-                   struct bfa_meminfo_s *meminfo)
+bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
 {
-       u8             *dm_kva;
-       u64     dm_pa;
-       int             i, per_reqq_sz, per_rspq_sz;
+       u8      *dm_kva = NULL;
+       u64     dm_pa = 0;
+       int     i, per_reqq_sz, per_rspq_sz, dbgsz;
        struct bfa_iocfc_s  *iocfc = &bfa->iocfc;
-       int             dbgsz;
+       struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
+       struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
+       struct bfa_mem_dma_s *reqq_dma, *rspq_dma;
 
-       dm_kva = bfa_meminfo_dma_virt(meminfo);
-       dm_pa = bfa_meminfo_dma_phys(meminfo);
+       /* First allocate dma memory for IOC */
+       bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
+                       bfa_mem_dma_phys(ioc_dma));
 
-       /*
-        * First allocate dma memory for IOC.
-        */
-       bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
-       dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
-       dm_pa  += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
-
-       /*
-        * Claim DMA-able memory for the request/response queues and for shadow
-        * ci/pi registers
-        */
+       /* Claim DMA-able memory for the request/response queues */
        per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
-                                 BFA_DMA_ALIGN_SZ);
+                               BFA_DMA_ALIGN_SZ);
        per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
-                                 BFA_DMA_ALIGN_SZ);
+                               BFA_DMA_ALIGN_SZ);
 
        for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
-               iocfc->req_cq_ba[i].kva = dm_kva;
-               iocfc->req_cq_ba[i].pa = dm_pa;
-               memset(dm_kva, 0, per_reqq_sz);
-               dm_kva += per_reqq_sz;
-               dm_pa += per_reqq_sz;
-
-               iocfc->rsp_cq_ba[i].kva = dm_kva;
-               iocfc->rsp_cq_ba[i].pa = dm_pa;
-               memset(dm_kva, 0, per_rspq_sz);
-               dm_kva += per_rspq_sz;
-               dm_pa += per_rspq_sz;
+               reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
+               iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
+               iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
+               memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);
+
+               rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
+               iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
+               iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
+               memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
        }
 
+       /* Claim IOCFC dma memory - for shadow CI/PI */
+       dm_kva = bfa_mem_dma_virt(iocfc_dma);
+       dm_pa  = bfa_mem_dma_phys(iocfc_dma);
+
        for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
                iocfc->req_cq_shadow_ci[i].kva = dm_kva;
                iocfc->req_cq_shadow_ci[i].pa = dm_pa;
@@ -572,36 +645,27 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
                dm_pa += BFA_CACHELINE_SZ;
        }
 
-       /*
-        * Claim DMA-able memory for the config info page
-        */
+       /* Claim IOCFC dma memory - for the config info page */
        bfa->iocfc.cfg_info.kva = dm_kva;
        bfa->iocfc.cfg_info.pa = dm_pa;
        bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
        dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
        dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
 
-       /*
-        * Claim DMA-able memory for the config response
-        */
+       /* Claim IOCFC dma memory - for the config response */
        bfa->iocfc.cfgrsp_dma.kva = dm_kva;
        bfa->iocfc.cfgrsp_dma.pa = dm_pa;
        bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
-
-       dm_kva +=
-               BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
-                           BFA_CACHELINE_SZ);
+       dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
+                       BFA_CACHELINE_SZ);
        dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
-                            BFA_CACHELINE_SZ);
-
-
-       bfa_meminfo_dma_virt(meminfo) = dm_kva;
-       bfa_meminfo_dma_phys(meminfo) = dm_pa;
+                       BFA_CACHELINE_SZ);
 
+       /* Claim IOCFC kva memory */
        dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
        if (dbgsz > 0) {
-               bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
-               bfa_meminfo_kva(meminfo) += dbgsz;
+               bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
+               bfa_mem_kva_curp(iocfc) += dbgsz;
        }
 }
 
@@ -613,7 +677,9 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
 {
        int             i;
 
-       bfa->rme_process = BFA_TRUE;
+       bfa->queue_process = BFA_TRUE;
+       for (i = 0; i < BFI_IOC_MAX_CQS; i++)
+               bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
 
        for (i = 0; hal_mods[i]; i++)
                hal_mods[i]->start(bfa);
@@ -637,7 +703,7 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
        struct bfa_s    *bfa = bfa_arg;
 
        if (complete) {
-               if (bfa->iocfc.cfgdone)
+               if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone)
                        bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
                else
                        bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
@@ -659,6 +725,16 @@ bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
                bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
 }
 
+static void
+bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
+{
+       struct bfa_s    *bfa = bfa_arg;
+       struct bfad_s *bfad = bfa->bfad;
+
+       if (compl)
+               complete(&bfad->enable_comp);
+}
+
 static void
 bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
 {
@@ -669,6 +745,37 @@ bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
                complete(&bfad->disable_comp);
 }
 
+/**
+ * configure queue registers from firmware response
+ */
+static void
+bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
+{
+       int     i;
+       struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
+       void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
+
+       for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
+               bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
+               r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
+               r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
+               r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
+               r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
+               r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
+               r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
+       }
+}
+
+static void
+bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
+{
+       bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
+       bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
+       bfa_rport_res_recfg(bfa, fwcfg->num_rports);
+       bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs);
+       bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
+}
+
 /*
  * Update BFA configuration from firmware configuration.
  */
@@ -681,6 +788,7 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
 
        fwcfg->num_cqs        = fwcfg->num_cqs;
        fwcfg->num_ioim_reqs  = be16_to_cpu(fwcfg->num_ioim_reqs);
+       fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
        fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
        fwcfg->num_fcxp_reqs  = be16_to_cpu(fwcfg->num_fcxp_reqs);
        fwcfg->num_uf_bufs    = be16_to_cpu(fwcfg->num_uf_bufs);
@@ -688,15 +796,36 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
 
        iocfc->cfgdone = BFA_TRUE;
 
+       /*
+        * configure queue register offsets as learnt from firmware
+        */
+       bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
+
+       /*
+        * Re-configure resources as learnt from Firmware
+        */
+       bfa_iocfc_res_recfg(bfa, fwcfg);
+
+       /*
+        * Install MSIX queue handlers
+        */
+       bfa_msix_queue_install(bfa);
+
        /*
         * Configuration is complete - initialize/start submodules
         */
        bfa_fcport_init(bfa);
 
-       if (iocfc->action == BFA_IOCFC_ACT_INIT)
-               bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
-       else
+       if (iocfc->action == BFA_IOCFC_ACT_INIT) {
+               if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
+                       bfa_cb_queue(bfa, &iocfc->init_hcb_qe,
+                               bfa_iocfc_init_cb, bfa);
+       } else {
+               if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
+                       bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
+                                       bfa_iocfc_enable_cb, bfa);
                bfa_iocfc_start_submod(bfa);
+       }
 }
 void
 bfa_iocfc_reset_queues(struct bfa_s *bfa)
@@ -711,6 +840,181 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa)
        }
 }
 
+/* Fabric Assigned Address specific functions */
+
+/*
+ *     Check whether IOC is ready before sending command down
+ */
+static bfa_status_t
+bfa_faa_validate_request(struct bfa_s *bfa)
+{
+       enum bfa_ioc_type_e     ioc_type = bfa_get_type(bfa);
+       u32     card_type = bfa->ioc.attr->card_type;
+
+       if (bfa_ioc_is_operational(&bfa->ioc)) {
+               if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
+                       return BFA_STATUS_FEATURE_NOT_SUPPORTED;
+       } else {
+               if (!bfa_ioc_is_acq_addr(&bfa->ioc))
+                       return BFA_STATUS_IOC_NON_OP;
+       }
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
+{
+       struct bfi_faa_en_dis_s faa_enable_req;
+       struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
+       bfa_status_t            status;
+
+       iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
+       iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
+
+       status = bfa_faa_validate_request(bfa);
+       if (status != BFA_STATUS_OK)
+               return status;
+
+       if (iocfc->faa_args.busy == BFA_TRUE)
+               return BFA_STATUS_DEVBUSY;
+
+       if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
+               return BFA_STATUS_FAA_ENABLED;
+
+       if (bfa_fcport_is_trunk_enabled(bfa))
+               return BFA_STATUS_ERROR_TRUNK_ENABLED;
+
+       bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
+       iocfc->faa_args.busy = BFA_TRUE;
+
+       memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
+       bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
+               BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa));
+
+       bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
+                       sizeof(struct bfi_faa_en_dis_s));
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
+               void *cbarg)
+{
+       struct bfi_faa_en_dis_s faa_disable_req;
+       struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
+       bfa_status_t            status;
+
+       iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
+       iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
+
+       status = bfa_faa_validate_request(bfa);
+       if (status != BFA_STATUS_OK)
+               return status;
+
+       if (iocfc->faa_args.busy == BFA_TRUE)
+               return BFA_STATUS_DEVBUSY;
+
+       if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
+               return BFA_STATUS_FAA_DISABLED;
+
+       bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
+       iocfc->faa_args.busy = BFA_TRUE;
+
+       memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
+       bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
+               BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa));
+
+       bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
+               sizeof(struct bfi_faa_en_dis_s));
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
+               bfa_cb_iocfc_t cbfn, void *cbarg)
+{
+       struct bfi_faa_query_s  faa_attr_req;
+       struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
+       bfa_status_t            status;
+
+       iocfc->faa_args.faa_attr = attr;
+       iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
+       iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
+
+       status = bfa_faa_validate_request(bfa);
+       if (status != BFA_STATUS_OK)
+               return status;
+
+       if (iocfc->faa_args.busy == BFA_TRUE)
+               return BFA_STATUS_DEVBUSY;
+
+       iocfc->faa_args.busy = BFA_TRUE;
+       memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
+       bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
+               BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
+
+       bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
+               sizeof(struct bfi_faa_query_s));
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ *     FAA enable response
+ */
+static void
+bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
+               struct bfi_faa_en_dis_rsp_s *rsp)
+{
+       void    *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
+       bfa_status_t    status = rsp->status;
+
+       WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
+
+       iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
+       iocfc->faa_args.busy = BFA_FALSE;
+}
+
+/*
+ *     FAA disable response
+ */
+static void
+bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
+               struct bfi_faa_en_dis_rsp_s *rsp)
+{
+       void    *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
+       bfa_status_t    status = rsp->status;
+
+       WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
+
+       iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
+       iocfc->faa_args.busy = BFA_FALSE;
+}
+
+/*
+ *     FAA query response
+ */
+static void
+bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
+               bfi_faa_query_rsp_t *rsp)
+{
+       void    *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
+
+       if (iocfc->faa_args.faa_attr) {
+               iocfc->faa_args.faa_attr->faa = rsp->faa;
+               iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
+               iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
+       }
+
+       WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
+
+       iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
+       iocfc->faa_args.busy = BFA_FALSE;
+}
+
 /*
  * IOC enable request is complete
  */
@@ -719,15 +1023,25 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
 {
        struct bfa_s    *bfa = bfa_arg;
 
+       if (status == BFA_STATUS_FAA_ACQ_ADDR) {
+               bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
+                               bfa_iocfc_init_cb, bfa);
+               return;
+       }
+
        if (status != BFA_STATUS_OK) {
                bfa_isr_disable(bfa);
                if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
                        bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
                                     bfa_iocfc_init_cb, bfa);
+               else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
+                       bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
+                                       bfa_iocfc_enable_cb, bfa);
                return;
        }
 
        bfa_iocfc_send_cfg(bfa);
+       bfa_dconf_modinit(bfa);
 }
 
 /*
@@ -759,7 +1073,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg)
 {
        struct bfa_s    *bfa = bfa_arg;
 
-       bfa->rme_process = BFA_FALSE;
+       bfa->queue_process = BFA_FALSE;
 
        bfa_isr_disable(bfa);
        bfa_iocfc_disable_submod(bfa);
@@ -786,15 +1100,47 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
  * Query IOC memory requirement information.
  */
 void
-bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-                 u32 *dm_len)
+bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+                 struct bfa_s *bfa)
 {
-       /* dma memory for IOC */
-       *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
+       int q, per_reqq_sz, per_rspq_sz;
+       struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
+       struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
+       struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
+       u32     dm_len = 0;
+
+       /* dma memory setup for IOC */
+       bfa_mem_dma_setup(meminfo, ioc_dma,
+               BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));
+
+       /* dma memory setup for REQ/RSP queues */
+       per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
+                               BFA_DMA_ALIGN_SZ);
+       per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
+                               BFA_DMA_ALIGN_SZ);
 
-       bfa_iocfc_fw_cfg_sz(cfg, dm_len);
-       bfa_iocfc_cqs_sz(cfg, dm_len);
-       *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
+       for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
+               bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
+                               per_reqq_sz);
+               bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
+                               per_rspq_sz);
+       }
+
+       /* IOCFC dma memory - calculate Shadow CI/PI size */
+       for (q = 0; q < cfg->fwcfg.num_cqs; q++)
+               dm_len += (2 * BFA_CACHELINE_SZ);
+
+       /* IOCFC dma memory - calculate config info / rsp size */
+       dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
+       dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
+                       BFA_CACHELINE_SZ);
+
+       /* dma memory setup for IOCFC */
+       bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
+
+       /* kva memory setup for IOCFC */
+       bfa_mem_kva_setup(meminfo, iocfc_kva,
+                       ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0));
 }
 
 /*
@@ -802,7 +1148,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
  */
 void
 bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-                struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+                struct bfa_pcidev_s *pcidev)
 {
        int             i;
        struct bfa_ioc_s *ioc = &bfa->ioc;
@@ -815,17 +1161,11 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
        ioc->trcmod = bfa->trcmod;
        bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
 
-       /*
-        * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
-        */
-       if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
-               bfa_ioc_set_fcmode(&bfa->ioc);
-
-       bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
+       bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
        bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
 
        bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
-       bfa_iocfc_mem_claim(bfa, cfg, meminfo);
+       bfa_iocfc_mem_claim(bfa, cfg);
        INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
 
        INIT_LIST_HEAD(&bfa->comp_q);
@@ -863,8 +1203,10 @@ bfa_iocfc_stop(struct bfa_s *bfa)
 {
        bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
 
-       bfa->rme_process = BFA_FALSE;
-       bfa_ioc_disable(&bfa->ioc);
+       bfa->queue_process = BFA_FALSE;
+       bfa_dconf_modexit(bfa);
+       if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
+               bfa_ioc_disable(&bfa->ioc);
 }
 
 void
@@ -879,12 +1221,22 @@ bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
 
        switch (msg->mh.msg_id) {
        case BFI_IOCFC_I2H_CFG_REPLY:
-               iocfc->cfg_reply = &msg->cfg_reply;
                bfa_iocfc_cfgrsp(bfa);
                break;
        case BFI_IOCFC_I2H_UPDATEQ_RSP:
                iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
                break;
+       case BFI_IOCFC_I2H_FAA_ENABLE_RSP:
+               bfa_faa_enable_reply(iocfc,
+                       (struct bfi_faa_en_dis_rsp_s *)msg);
+               break;
+       case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
+               bfa_faa_disable_reply(iocfc,
+                       (struct bfi_faa_en_dis_rsp_s *)msg);
+               break;
+       case BFI_IOCFC_I2H_FAA_QUERY_RSP:
+               bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
+               break;
        default:
                WARN_ON(1);
        }
@@ -926,7 +1278,7 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
                return BFA_STATUS_DEVBUSY;
 
        bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
-                   bfa_lpuid(bfa));
+                   bfa_fn_lpu(bfa));
        m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
        m->delay    = iocfc->cfginfo->intr_attr.delay;
        m->latency  = iocfc->cfginfo->intr_attr.latency;
@@ -934,17 +1286,17 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
        bfa_trc(bfa, attr->delay);
        bfa_trc(bfa, attr->latency);
 
-       bfa_reqq_produce(bfa, BFA_REQQ_IOC);
+       bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
        return BFA_STATUS_OK;
 }
 
 void
-bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
+bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
 {
        struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
 
        iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
-       bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
+       bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
 }
 /*
  * Enable IOC after it is disabled.
@@ -954,6 +1306,7 @@ bfa_iocfc_enable(struct bfa_s *bfa)
 {
        bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
                     "IOC Enable");
+       bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE;
        bfa_ioc_enable(&bfa->ioc);
 }
 
@@ -964,7 +1317,7 @@ bfa_iocfc_disable(struct bfa_s *bfa)
                     "IOC Disable");
        bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
 
-       bfa->rme_process = BFA_FALSE;
+       bfa->queue_process = BFA_FALSE;
        bfa_ioc_disable(&bfa->ioc);
 }
 
@@ -1033,33 +1386,49 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
  *                     starting address for each block and provide the same
  *                     structure as input parameter to bfa_attach() call.
  *
+ * @param[in] bfa -    pointer to the bfa structure, used while fetching the
+ *                     dma, kva memory information of the bfa sub-modules.
+ *
  * @return void
  *
  * Special Considerations: @note
  */
 void
-bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
+bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+               struct bfa_s *bfa)
 {
        int             i;
-       u32     km_len = 0, dm_len = 0;
+       struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
+       struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
+       struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
+       struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
+       struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
+       struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
+       struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
 
        WARN_ON((cfg == NULL) || (meminfo == NULL));
 
        memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
-       meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
-               BFA_MEM_TYPE_KVA;
-       meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
-               BFA_MEM_TYPE_DMA;
 
-       bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
-
-       for (i = 0; hal_mods[i]; i++)
-               hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
+       /* Initialize the DMA & KVA meminfo queues */
+       INIT_LIST_HEAD(&meminfo->dma_info.qe);
+       INIT_LIST_HEAD(&meminfo->kva_info.qe);
 
-       dm_len += bfa_port_meminfo();
+       bfa_iocfc_meminfo(cfg, meminfo, bfa);
 
-       meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
-       meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
+       for (i = 0; hal_mods[i]; i++)
+               hal_mods[i]->meminfo(cfg, meminfo, bfa);
+
+       /* dma info setup */
+       bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
+       bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
+       bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo());
+       bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo());
+       bfa_mem_dma_setup(meminfo, flash_dma,
+                         bfa_flash_meminfo(cfg->drvcfg.min_cfg));
+       bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo());
+       bfa_mem_dma_setup(meminfo, phy_dma,
+                         bfa_phy_meminfo(cfg->drvcfg.min_cfg));
 }
 
 /*
@@ -1092,28 +1461,46 @@ void
 bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
               struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
 {
-       int                     i;
-       struct bfa_mem_elem_s   *melem;
+       int     i;
+       struct bfa_mem_dma_s *dma_info, *dma_elem;
+       struct bfa_mem_kva_s *kva_info, *kva_elem;
+       struct list_head *dm_qe, *km_qe;
 
        bfa->fcs = BFA_FALSE;
 
        WARN_ON((cfg == NULL) || (meminfo == NULL));
 
-       /*
-        * initialize all memory pointers for iterative allocation
-        */
-       for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
-               melem = meminfo->meminfo + i;
-               melem->kva_curp = melem->kva;
-               melem->dma_curp = melem->dma;
+       /* Initialize memory pointers for iterative allocation */
+       dma_info = &meminfo->dma_info;
+       dma_info->kva_curp = dma_info->kva;
+       dma_info->dma_curp = dma_info->dma;
+
+       kva_info = &meminfo->kva_info;
+       kva_info->kva_curp = kva_info->kva;
+
+       list_for_each(dm_qe, &dma_info->qe) {
+               dma_elem = (struct bfa_mem_dma_s *) dm_qe;
+               dma_elem->kva_curp = dma_elem->kva;
+               dma_elem->dma_curp = dma_elem->dma;
        }
 
-       bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
+       list_for_each(km_qe, &kva_info->qe) {
+               kva_elem = (struct bfa_mem_kva_s *) km_qe;
+               kva_elem->kva_curp = kva_elem->kva;
+       }
 
-       for (i = 0; hal_mods[i]; i++)
-               hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
+       bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
 
-       bfa_com_port_attach(bfa, meminfo);
+       for (i = 0; hal_mods[i]; i++)
+               hal_mods[i]->attach(bfa, bfad, cfg, pcidev);
+
+       bfa_com_port_attach(bfa);
+       bfa_com_ablk_attach(bfa);
+       bfa_com_cee_attach(bfa);
+       bfa_com_sfp_attach(bfa);
+       bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
+       bfa_com_diag_attach(bfa);
+       bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg);
 }
 
 /*
@@ -1152,10 +1539,17 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
        struct list_head                *qe;
        struct list_head                *qen;
        struct bfa_cb_qe_s      *hcb_qe;
+       bfa_cb_cbfn_status_t    cbfn;
 
        list_for_each_safe(qe, qen, comp_q) {
                hcb_qe = (struct bfa_cb_qe_s *) qe;
-               hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
+               if (hcb_qe->pre_rmv) {
+                       /* qe is invalid after return, dequeue before cbfn() */
+                       list_del(qe);
+                       cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
+                       cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
+               } else
+                       hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
        }
 }
 
@@ -1168,10 +1562,20 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
        while (!list_empty(comp_q)) {
                bfa_q_deq(comp_q, &qe);
                hcb_qe = (struct bfa_cb_qe_s *) qe;
+               WARN_ON(hcb_qe->pre_rmv);
                hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
        }
 }
 
+void
+bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status)
+{
+       if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) {
+               if (bfa->iocfc.cfgdone == BFA_TRUE)
+                       bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
+                               bfa_iocfc_init_cb, bfa);
+       }
+}
 
 /*
  * Return the list of PCI vendor/device id lists supported by this
@@ -1215,6 +1619,7 @@ bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
        cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
        cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
        cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
+       cfg->fwcfg.num_fwtio_reqs = 0;
 
        cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
        cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
@@ -1236,6 +1641,7 @@ bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
        cfg->fwcfg.num_fcxp_reqs   = BFA_FCXP_MIN;
        cfg->fwcfg.num_uf_bufs     = BFA_UF_MIN;
        cfg->fwcfg.num_rports      = BFA_RPORT_MIN;
+       cfg->fwcfg.num_fwtio_reqs = 0;
 
        cfg->drvcfg.num_sgpgs      = BFA_SGPG_MIN;
        cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
index d85f93aea4650b670ecbbe171c303042c04fd8d8..7b3d235d20b4638fb62d270b5a5a37980230055e 100644 (file)
@@ -40,7 +40,12 @@ enum {
        BFA_MFG_TYPE_ASTRA    = 807,     /*  Astra mezz card            */
        BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*  Lightning mezz card - old  */
        BFA_MFG_TYPE_LIGHTNING = 1741,   /*  Lightning mezz card        */
-       BFA_MFG_TYPE_INVALID = 0,        /*  Invalid card type          */
+       BFA_MFG_TYPE_PROWLER_F = 1560,   /*  Prowler FC only cards      */
+       BFA_MFG_TYPE_PROWLER_N = 1410,   /*  Prowler NIC only cards     */
+       BFA_MFG_TYPE_PROWLER_C = 1710,   /*  Prowler CNA only cards     */
+       BFA_MFG_TYPE_PROWLER_D = 1860,   /*  Prowler Dual cards         */
+       BFA_MFG_TYPE_CHINOOK   = 1867,   /*  Chinook cards              */
+       BFA_MFG_TYPE_INVALID = 0,        /*  Invalid card type          */
 };
 
 #pragma pack(1)
@@ -53,7 +58,8 @@ enum {
        (type) == BFA_MFG_TYPE_WANCHESE || \
        (type) == BFA_MFG_TYPE_ASTRA || \
        (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
-       (type) == BFA_MFG_TYPE_LIGHTNING))
+       (type) == BFA_MFG_TYPE_LIGHTNING || \
+       (type) == BFA_MFG_TYPE_CHINOOK))
 
 /*
  * Check if the card having old wwn/mac handling
@@ -124,30 +130,60 @@ enum bfa_status {
        BFA_STATUS_ETIMER       = 5,    /*  Timer expired - Retry, if persists,
                                         *  contact support */
        BFA_STATUS_EPROTOCOL    = 6,    /*  Protocol error */
+       BFA_STATUS_SFP_UNSUPP   = 10,   /*  Unsupported SFP - Replace SFP */
+       BFA_STATUS_UNKNOWN_VFID = 11,   /*  VF_ID not found */
+       BFA_STATUS_DATACORRUPTED = 12,  /*  Diag returned data corrupted */
        BFA_STATUS_DEVBUSY      = 13,   /*  Device busy - Retry operation */
+       BFA_STATUS_HDMA_FAILED  = 16,   /* Host dma failed contact support */
+       BFA_STATUS_FLASH_BAD_LEN = 17,  /*  Flash bad length */
        BFA_STATUS_UNKNOWN_LWWN = 18,   /*  LPORT PWWN not found */
        BFA_STATUS_UNKNOWN_RWWN = 19,   /*  RPORT PWWN not found */
        BFA_STATUS_VPORT_EXISTS = 21,   /*  VPORT already exists */
        BFA_STATUS_VPORT_MAX    = 22,   /*  Reached max VPORT supported limit */
        BFA_STATUS_UNSUPP_SPEED = 23,   /*  Invalid Speed Check speed setting */
        BFA_STATUS_INVLD_DFSZ   = 24,   /*  Invalid Max data field size */
+       BFA_STATUS_CMD_NOTSUPP  = 26,   /*  Command/API not supported */
        BFA_STATUS_FABRIC_RJT   = 29,   /*  Reject from attached fabric */
+       BFA_STATUS_UNKNOWN_VWWN = 30,   /*  VPORT PWWN not found */
+       BFA_STATUS_PORT_OFFLINE = 34,   /*  Port is not online */
        BFA_STATUS_VPORT_WWN_BP = 46,   /*  WWN is same as base port's WWN */
+       BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */
        BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the rport */
        BFA_STATUS_IOC_FAILURE  = 56,   /* IOC failure - Retry, if persists
                                         * contact support */
        BFA_STATUS_INVALID_WWN  = 57,   /*  Invalid WWN */
+       BFA_STATUS_ADAPTER_ENABLED = 60, /* Adapter is not disabled */
+       BFA_STATUS_IOC_NON_OP   = 61,   /* IOC is not operational */
+       BFA_STATUS_VERSION_FAIL = 70, /* Application/Driver version mismatch */
        BFA_STATUS_DIAG_BUSY    = 71,   /*  diag busy */
+       BFA_STATUS_BEACON_ON    = 72,   /* Port Beacon already on */
        BFA_STATUS_ENOFSAVE     = 78,   /*  No saved firmware trace */
        BFA_STATUS_IOC_DISABLED = 82,   /* IOC is already disabled */
+       BFA_STATUS_NO_SFP_DEV = 89,     /* No SFP device check or replace SFP */
+       BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */
+       BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */
        BFA_STATUS_INVALID_MAC  = 134, /*  Invalid MAC address */
        BFA_STATUS_PBC          = 154, /*  Operation not allowed for pre-boot
                                        *  configuration */
+       BFA_STATUS_BAD_FWCFG = 156,     /* Bad firmware configuration */
+       BFA_STATUS_INVALID_VENDOR = 158, /* Invalid switch vendor */
+       BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */
        BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on
                                         * this adapter */
        BFA_STATUS_TRUNK_DISABLED  = 165, /* Trunking is disabled on
                                           * the adapter */
        BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */
+       BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */
+       BFA_STATUS_FEATURE_NOT_SUPPORTED = 192, /* Feature not supported */
+       BFA_STATUS_ENTRY_EXISTS = 193,  /* Entry already exists */
+       BFA_STATUS_ENTRY_NOT_EXISTS = 194, /* Entry does not exist */
+       BFA_STATUS_NO_CHANGE = 195,     /* Feature already in that state */
+       BFA_STATUS_FAA_ENABLED = 197,   /* FAA is already enabled */
+       BFA_STATUS_FAA_DISABLED = 198,  /* FAA is already disabled */
+       BFA_STATUS_FAA_ACQUIRED = 199,  /* FAA is already acquired */
+       BFA_STATUS_FAA_ACQ_ADDR = 200,  /* Acquiring addr */
+       BFA_STATUS_ERROR_TRUNK_ENABLED = 203,   /* Trunk enabled on adapter */
+       BFA_STATUS_MAX_ENTRY_REACHED = 212,     /* MAX entry reached */
        BFA_STATUS_MAX_VAL              /* Unknown error code */
 };
 #define bfa_status_t enum bfa_status
@@ -265,6 +301,8 @@ enum bfa_ioc_state {
        BFA_IOC_DISABLED        = 10,   /*  IOC is disabled */
        BFA_IOC_FWMISMATCH      = 11,   /*  IOC f/w different from drivers */
        BFA_IOC_ENABLING        = 12,   /*  IOC is being enabled */
+       BFA_IOC_HWFAIL          = 13,   /*  PCI mapping doesn't exist */
+       BFA_IOC_ACQ_ADDR        = 14,   /*  Acquiring addr from fabric */
 };
 
 /*
@@ -294,6 +332,7 @@ struct bfa_ioc_drv_stats_s {
        u32     enable_reqs;
        u32     disable_replies;
        u32     enable_replies;
+       u32     rsvd;
 };
 
 /*
@@ -320,7 +359,143 @@ struct bfa_ioc_attr_s {
        struct bfa_ioc_driver_attr_s    driver_attr;    /*  driver attr    */
        struct bfa_ioc_pci_attr_s       pci_attr;
        u8                              port_id;        /*  port number    */
-       u8                              rsvd[7];        /*  64bit align    */
+       u8                              port_mode;      /*  bfa_mode_s  */
+       u8                              cap_bm;         /*  capability  */
+       u8                              port_mode_cfg;  /*  bfa_mode_s  */
+       u8                              rsvd[4];        /*  64bit align */
+};
+
+/*
+ *                     AEN related definitions
+ */
+enum bfa_aen_category {
+       BFA_AEN_CAT_ADAPTER     = 1,
+       BFA_AEN_CAT_PORT        = 2,
+       BFA_AEN_CAT_LPORT       = 3,
+       BFA_AEN_CAT_RPORT       = 4,
+       BFA_AEN_CAT_ITNIM       = 5,
+       BFA_AEN_CAT_AUDIT       = 8,
+       BFA_AEN_CAT_IOC         = 9,
+};
+
+/* BFA adapter level events */
+enum bfa_adapter_aen_event {
+       BFA_ADAPTER_AEN_ADD     = 1,    /* New Adapter found event */
+       BFA_ADAPTER_AEN_REMOVE  = 2,    /* Adapter removed event */
+};
+
+struct bfa_adapter_aen_data_s {
+       char    serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+       u32     nports; /* Number of NPorts */
+       wwn_t   pwwn;   /* WWN of one of its physical port */
+};
+
+/* BFA physical port Level events */
+enum bfa_port_aen_event {
+       BFA_PORT_AEN_ONLINE     = 1,    /* Physical Port online event */
+       BFA_PORT_AEN_OFFLINE    = 2,    /* Physical Port offline event */
+       BFA_PORT_AEN_RLIR       = 3,    /* RLIR event, not supported */
+       BFA_PORT_AEN_SFP_INSERT = 4,    /* SFP inserted event */
+       BFA_PORT_AEN_SFP_REMOVE = 5,    /* SFP removed event */
+       BFA_PORT_AEN_SFP_POM    = 6,    /* SFP POM event */
+       BFA_PORT_AEN_ENABLE     = 7,    /* Physical Port enable event */
+       BFA_PORT_AEN_DISABLE    = 8,    /* Physical Port disable event */
+       BFA_PORT_AEN_AUTH_ON    = 9,    /* Physical Port auth success event */
+       BFA_PORT_AEN_AUTH_OFF   = 10,   /* Physical Port auth fail event */
+       BFA_PORT_AEN_DISCONNECT = 11,   /* Physical Port disconnect event */
+       BFA_PORT_AEN_QOS_NEG    = 12,   /* Base Port QOS negotiation event */
+       BFA_PORT_AEN_FABRIC_NAME_CHANGE = 13, /* Fabric Name/WWN change */
+       BFA_PORT_AEN_SFP_ACCESS_ERROR   = 14, /* SFP read error event */
+       BFA_PORT_AEN_SFP_UNSUPPORT      = 15, /* Unsupported SFP event */
+};
+
+enum bfa_port_aen_sfp_pom {
+       BFA_PORT_AEN_SFP_POM_GREEN = 1, /* Normal */
+       BFA_PORT_AEN_SFP_POM_AMBER = 2, /* Warning */
+       BFA_PORT_AEN_SFP_POM_RED   = 3, /* Critical */
+       BFA_PORT_AEN_SFP_POM_MAX   = BFA_PORT_AEN_SFP_POM_RED
+};
+
+struct bfa_port_aen_data_s {
+       wwn_t           pwwn;           /* WWN of the physical port */
+       wwn_t           fwwn;           /* WWN of the fabric port */
+       u32             phy_port_num;   /* For SFP related events */
+       u16             ioc_type;
+       u16             level;          /* Only transitions will be informed */
+       mac_t           mac;            /* MAC address of the ethernet port */
+       u16             rsvd;
+};
+
+/* BFA AEN logical port events */
+enum bfa_lport_aen_event {
+       BFA_LPORT_AEN_NEW       = 1,            /* LPort created event */
+       BFA_LPORT_AEN_DELETE    = 2,            /* LPort deleted event */
+       BFA_LPORT_AEN_ONLINE    = 3,            /* LPort online event */
+       BFA_LPORT_AEN_OFFLINE   = 4,            /* LPort offline event */
+       BFA_LPORT_AEN_DISCONNECT = 5,           /* LPort disconnect event */
+       BFA_LPORT_AEN_NEW_PROP  = 6,            /* VPort created event */
+       BFA_LPORT_AEN_DELETE_PROP = 7,          /* VPort deleted event */
+       BFA_LPORT_AEN_NEW_STANDARD = 8,         /* VPort created event */
+       BFA_LPORT_AEN_DELETE_STANDARD = 9,      /* VPort deleted event */
+       BFA_LPORT_AEN_NPIV_DUP_WWN = 10,        /* VPort with duplicate WWN */
+       BFA_LPORT_AEN_NPIV_FABRIC_MAX = 11,     /* Max NPIV in fabric/fport */
+       BFA_LPORT_AEN_NPIV_UNKNOWN = 12,        /* Unknown NPIV Error code */
+};
+
+struct bfa_lport_aen_data_s {
+       u16     vf_id;  /* vf_id of this logical port */
+       u16     roles;  /* Logical port mode,IM/TM/IP etc */
+       u32     rsvd;
+       wwn_t   ppwwn;  /* WWN of its physical port */
+       wwn_t   lpwwn;  /* WWN of this logical port */
+};
+
+/* BFA ITNIM events */
+enum bfa_itnim_aen_event {
+       BFA_ITNIM_AEN_ONLINE     = 1,   /* Target online */
+       BFA_ITNIM_AEN_OFFLINE    = 2,   /* Target offline */
+       BFA_ITNIM_AEN_DISCONNECT = 3,   /* Target disconnected */
+};
+
+struct bfa_itnim_aen_data_s {
+       u16             vf_id;          /* vf_id of the IT nexus */
+       u16             rsvd[3];
+       wwn_t           ppwwn;          /* WWN of its physical port */
+       wwn_t           lpwwn;          /* WWN of logical port */
+       wwn_t           rpwwn;          /* WWN of remote(target) port */
+};
+
+/* BFA audit events */
+enum bfa_audit_aen_event {
+       BFA_AUDIT_AEN_AUTH_ENABLE       = 1,
+       BFA_AUDIT_AEN_AUTH_DISABLE      = 2,
+       BFA_AUDIT_AEN_FLASH_ERASE       = 3,
+       BFA_AUDIT_AEN_FLASH_UPDATE      = 4,
+};
+
+struct bfa_audit_aen_data_s {
+       wwn_t   pwwn;
+       int     partition_inst;
+       int     partition_type;
+};
+
+/* BFA IOC level events */
+enum bfa_ioc_aen_event {
+       BFA_IOC_AEN_HBGOOD  = 1,        /* Heart Beat restore event     */
+       BFA_IOC_AEN_HBFAIL  = 2,        /* Heart Beat failure event     */
+       BFA_IOC_AEN_ENABLE  = 3,        /* IOC enabled event            */
+       BFA_IOC_AEN_DISABLE = 4,        /* IOC disabled event           */
+       BFA_IOC_AEN_FWMISMATCH  = 5,    /* IOC firmware mismatch        */
+       BFA_IOC_AEN_FWCFG_ERROR = 6,    /* IOC firmware config error    */
+       BFA_IOC_AEN_INVALID_VENDOR = 7,
+       BFA_IOC_AEN_INVALID_NWWN = 8,   /* Zero NWWN                    */
+       BFA_IOC_AEN_INVALID_PWWN = 9    /* Zero PWWN                    */
+};
+
+struct bfa_ioc_aen_data_s {
+       wwn_t   pwwn;
+       u16     ioc_type;
+       mac_t   mac;
 };
 
 /*
@@ -337,6 +512,21 @@ struct bfa_ioc_attr_s {
 #define BFA_MFG_SUPPLIER_PARTNUM_SIZE          20
 #define BFA_MFG_SUPPLIER_SERIALNUM_SIZE                20
 #define BFA_MFG_SUPPLIER_REVISION_SIZE         4
+/*
+ * Initial capability definition
+ */
+#define BFA_MFG_IC_FC  0x01
+#define BFA_MFG_IC_ETH 0x02
+
+/*
+ * Adapter capability mask definition
+ */
+#define BFA_CM_HBA     0x01
+#define BFA_CM_CNA     0x02
+#define BFA_CM_NIC     0x04
+#define BFA_CM_FC16G   0x08
+#define BFA_CM_SRIOV   0x10
+#define BFA_CM_MEZZ    0x20
 
 #pragma pack(1)
 
@@ -344,31 +534,39 @@ struct bfa_ioc_attr_s {
  * All numerical fields are in big-endian format.
  */
 struct bfa_mfg_block_s {
-       u8              version;        /*  manufacturing block version */
-       u8              mfg_sig[3];     /*  characters 'M', 'F', 'G' */
-       u16     mfgsize;        /*  mfg block size */
-       u16     u16_chksum;     /*  old u16 checksum */
-       char            brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
-       char            brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
-       u8              mfg_day;        /*  manufacturing day */
-       u8              mfg_month;      /*  manufacturing month */
-       u16     mfg_year;       /*  manufacturing year */
-       wwn_t           mfg_wwn;        /*  wwn base for this adapter */
-       u8              num_wwn;        /*  number of wwns assigned */
-       u8              mfg_speeds;     /*  speeds allowed for this adapter */
-       u8              rsv[2];
-       char            supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
-       char            supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
-       char
-               supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
-       char
-               supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
-       mac_t           mfg_mac;        /*  mac address */
-       u8              num_mac;        /*  number of mac addresses */
-       u8              rsv2;
-       u32     mfg_type;       /*  card type */
-       u8              rsv3[108];
-       u8              md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*  md5 checksum */
+       u8      version;    /*!< manufacturing block version */
+       u8     mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
+       u16    mfgsize;    /*!< mfg block size */
+       u16    u16_chksum; /*!< old u16 checksum */
+       char        brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+       char        brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+       u8     mfg_day;    /*!< manufacturing day */
+       u8     mfg_month;  /*!< manufacturing month */
+       u16    mfg_year;   /*!< manufacturing year */
+       wwn_t       mfg_wwn;    /*!< wwn base for this adapter */
+       u8     num_wwn;    /*!< number of wwns assigned */
+       u8     mfg_speeds; /*!< speeds allowed for this adapter */
+       u8     rsv[2];
+       char    supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+       char    supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+       char    supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+       char    supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+       mac_t       mfg_mac;    /*!< base mac address */
+       u8     num_mac;    /*!< number of mac addresses */
+       u8     rsv2;
+       u32    card_type;  /*!< card type          */
+       char        cap_nic;    /*!< capability nic     */
+       char        cap_cna;    /*!< capability cna     */
+       char        cap_hba;    /*!< capability hba     */
+       char        cap_fc16g;  /*!< capability fc 16g      */
+       char        cap_sriov;  /*!< capability sriov       */
+       char        cap_mezz;   /*!< capability mezz        */
+       u8     rsv3;
+       u8     mfg_nports; /*!< number of ports        */
+       char        media[8];   /*!< xfi/xaui           */
+       char        initial_mode[8]; /*!< initial mode: hba/cna/nic */
+       u8     rsv4[84];
+       u8     md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
 };
 
 #pragma pack()
@@ -386,17 +584,27 @@ enum {
        BFA_PCI_DEVICE_ID_FC_8G1P       = 0x17,
        BFA_PCI_DEVICE_ID_CT            = 0x14,
        BFA_PCI_DEVICE_ID_CT_FC         = 0x21,
+       BFA_PCI_DEVICE_ID_CT2           = 0x22,
 };
 
-#define bfa_asic_id_ct(devid)                  \
-       ((devid) == BFA_PCI_DEVICE_ID_CT ||     \
-        (devid) == BFA_PCI_DEVICE_ID_CT_FC)
+#define bfa_asic_id_cb(__d)                    \
+       ((__d) == BFA_PCI_DEVICE_ID_FC_8G2P ||  \
+        (__d) == BFA_PCI_DEVICE_ID_FC_8G1P)
+#define bfa_asic_id_ct(__d)                    \
+       ((__d) == BFA_PCI_DEVICE_ID_CT ||       \
+        (__d) == BFA_PCI_DEVICE_ID_CT_FC)
+#define bfa_asic_id_ct2(__d)   ((__d) == BFA_PCI_DEVICE_ID_CT2)
+#define bfa_asic_id_ctc(__d)   \
+       (bfa_asic_id_ct(__d) || bfa_asic_id_ct2(__d))
 
 /*
  * PCI sub-system device and vendor ID information
  */
 enum {
        BFA_PCI_FCOE_SSDEVICE_ID        = 0x14,
+       BFA_PCI_CT2_SSID_FCoE           = 0x22,
+       BFA_PCI_CT2_SSID_ETH            = 0x23,
+       BFA_PCI_CT2_SSID_FC             = 0x24,
 };
 
 /*
@@ -416,9 +624,7 @@ enum bfa_port_speed {
        BFA_PORT_SPEED_8GBPS    = 8,
        BFA_PORT_SPEED_10GBPS   = 10,
        BFA_PORT_SPEED_16GBPS   = 16,
-       BFA_PORT_SPEED_AUTO =
-               (BFA_PORT_SPEED_1GBPS | BFA_PORT_SPEED_2GBPS |
-                BFA_PORT_SPEED_4GBPS | BFA_PORT_SPEED_8GBPS),
+       BFA_PORT_SPEED_AUTO     = 0xf,
 };
 #define bfa_port_speed_t enum bfa_port_speed
 
@@ -454,6 +660,20 @@ struct bfa_boot_bootlun_s {
 /*
  * BOOT boot configuraton
  */
+struct bfa_boot_cfg_s {
+       u8              version;
+       u8              rsvd1;
+       u16             chksum;
+       u8              enable;         /* enable/disable SAN boot */
+       u8              speed;          /* boot speed settings */
+       u8              topology;       /* boot topology setting */
+       u8              bootopt;        /* bfa_boot_bootopt_t */
+       u32             nbluns;         /* number of boot luns */
+       u32             rsvd2;
+       struct bfa_boot_bootlun_s blun[BFA_BOOT_BOOTLUN_MAX];
+       struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX];
+};
+
 struct bfa_boot_pbc_s {
        u8              enable;         /*  enable/disable SAN boot */
        u8              speed;          /*  boot speed settings */
@@ -463,4 +683,470 @@ struct bfa_boot_pbc_s {
        struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
 };
 
+struct bfa_ethboot_cfg_s {
+       u8              version;
+       u8              rsvd1;
+       u16             chksum;
+       u8              enable; /* enable/disable Eth/PXE boot */
+       u8              rsvd2;
+       u16             vlan;
+};
+
+/*
+ * ASIC block configuration related structures
+ */
+#define BFA_ABLK_MAX_PORTS     2
+#define BFA_ABLK_MAX_PFS       16
+#define BFA_ABLK_MAX           2
+
+#pragma pack(1)
+enum bfa_mode_s {
+       BFA_MODE_HBA    = 1,
+       BFA_MODE_CNA    = 2,
+       BFA_MODE_NIC    = 3
+};
+
+struct bfa_adapter_cfg_mode_s {
+       u16     max_pf;
+       u16     max_vf;
+       enum bfa_mode_s mode;
+};
+
+struct bfa_ablk_cfg_pf_s {
+       u16     pers;
+       u8      port_id;
+       u8      optrom;
+       u8      valid;
+       u8      sriov;
+       u8      max_vfs;
+       u8      rsvd[1];
+       u16     num_qpairs;
+       u16     num_vectors;
+       u32     bw;
+};
+
+struct bfa_ablk_cfg_port_s {
+       u8      mode;
+       u8      type;
+       u8      max_pfs;
+       u8      rsvd[5];
+};
+
+struct bfa_ablk_cfg_inst_s {
+       u8      nports;
+       u8      max_pfs;
+       u8      rsvd[6];
+       struct bfa_ablk_cfg_pf_s        pf_cfg[BFA_ABLK_MAX_PFS];
+       struct bfa_ablk_cfg_port_s      port_cfg[BFA_ABLK_MAX_PORTS];
+};
+
+struct bfa_ablk_cfg_s {
+       struct bfa_ablk_cfg_inst_s      inst[BFA_ABLK_MAX];
+};
+
+
+/*
+ *     SFP module specific
+ */
+#define SFP_DIAGMON_SIZE       10 /* num bytes of diag monitor data */
+
+/* SFP state change notification event */
+#define BFA_SFP_SCN_REMOVED    0
+#define BFA_SFP_SCN_INSERTED   1
+#define BFA_SFP_SCN_POM                2
+#define BFA_SFP_SCN_FAILED     3
+#define BFA_SFP_SCN_UNSUPPORT  4
+#define BFA_SFP_SCN_VALID      5
+
+enum bfa_defs_sfp_media_e {
+       BFA_SFP_MEDIA_UNKNOWN   = 0x00,
+       BFA_SFP_MEDIA_CU        = 0x01,
+       BFA_SFP_MEDIA_LW        = 0x02,
+       BFA_SFP_MEDIA_SW        = 0x03,
+       BFA_SFP_MEDIA_EL        = 0x04,
+       BFA_SFP_MEDIA_UNSUPPORT = 0x05,
+};
+
+/*
+ * values for xmtr_tech above
+ */
+enum {
+       SFP_XMTR_TECH_CU = (1 << 0),    /* copper FC-BaseT */
+       SFP_XMTR_TECH_CP = (1 << 1),    /* copper passive */
+       SFP_XMTR_TECH_CA = (1 << 2),    /* copper active */
+       SFP_XMTR_TECH_LL = (1 << 3),    /* longwave laser */
+       SFP_XMTR_TECH_SL = (1 << 4),    /* shortwave laser w/ OFC */
+       SFP_XMTR_TECH_SN = (1 << 5),    /* shortwave laser w/o OFC */
+       SFP_XMTR_TECH_EL_INTRA = (1 << 6), /* elec intra-enclosure */
+       SFP_XMTR_TECH_EL_INTER = (1 << 7), /* elec inter-enclosure */
+       SFP_XMTR_TECH_LC = (1 << 8),    /* longwave laser */
+       SFP_XMTR_TECH_SA = (1 << 9)
+};
+
+/*
+ * Serial ID: Data Fields -- Address A0h
+ * Basic ID field total 64 bytes
+ */
+struct sfp_srlid_base_s {
+       u8      id;             /* 00: Identifier */
+       u8      extid;          /* 01: Extended Identifier */
+       u8      connector;      /* 02: Connector */
+       u8      xcvr[8];        /* 03-10: Transceiver */
+       u8      encoding;       /* 11: Encoding */
+       u8      br_norm;        /* 12: BR, Nominal */
+       u8      rate_id;        /* 13: Rate Identifier */
+       u8      len_km;         /* 14: Length single mode km */
+       u8      len_100m;       /* 15: Length single mode 100m */
+       u8      len_om2;        /* 16: Length om2 fiber 10m */
+       u8      len_om1;        /* 17: Length om1 fiber 10m */
+       u8      len_cu;         /* 18: Length copper 1m */
+       u8      len_om3;        /* 19: Length om3 fiber 10m */
+       u8      vendor_name[16];/* 20-35 */
+       u8      unalloc1;
+       u8      vendor_oui[3];  /* 37-39 */
+       u8      vendor_pn[16];  /* 40-55 */
+       u8      vendor_rev[4];  /* 56-59 */
+       u8      wavelen[2];     /* 60-61 */
+       u8      unalloc2;
+       u8      cc_base;        /* 63: check code for base id field */
+};
+
+/*
+ * Serial ID: Data Fields -- Address A0h
+ * Extended id field total 32 bytes
+ */
+struct sfp_srlid_ext_s {
+       u8      options[2];
+       u8      br_max;
+       u8      br_min;
+       u8      vendor_sn[16];
+       u8      date_code[8];
+       u8      diag_mon_type;  /* 92: Diagnostic Monitoring type */
+       u8      en_options;
+       u8      sff_8472;
+       u8      cc_ext;
+};
+
+/*
+ * Diagnostic: Data Fields -- Address A2h
+ * Diagnostic and control/status base field total 96 bytes
+ */
+struct sfp_diag_base_s {
+       /*
+        * Alarm and warning Thresholds 40 bytes
+        */
+       u8      temp_high_alarm[2]; /* 00-01 */
+       u8      temp_low_alarm[2];  /* 02-03 */
+       u8      temp_high_warning[2];   /* 04-05 */
+       u8      temp_low_warning[2];    /* 06-07 */
+
+       u8      volt_high_alarm[2]; /* 08-09 */
+       u8      volt_low_alarm[2];  /* 10-11 */
+       u8      volt_high_warning[2];   /* 12-13 */
+       u8      volt_low_warning[2];    /* 14-15 */
+
+       u8      bias_high_alarm[2]; /* 16-17 */
+       u8      bias_low_alarm[2];  /* 18-19 */
+       u8      bias_high_warning[2];   /* 20-21 */
+       u8      bias_low_warning[2];    /* 22-23 */
+
+       u8      tx_pwr_high_alarm[2];   /* 24-25 */
+       u8      tx_pwr_low_alarm[2];    /* 26-27 */
+       u8      tx_pwr_high_warning[2]; /* 28-29 */
+       u8      tx_pwr_low_warning[2];  /* 30-31 */
+
+       u8      rx_pwr_high_alarm[2];   /* 32-33 */
+       u8      rx_pwr_low_alarm[2];    /* 34-35 */
+       u8      rx_pwr_high_warning[2]; /* 36-37 */
+       u8      rx_pwr_low_warning[2];  /* 38-39 */
+
+       u8      unallocate_1[16];
+
+       /*
+        * ext_cal_const[36]
+        */
+       u8      rx_pwr[20];
+       u8      tx_i[4];
+       u8      tx_pwr[4];
+       u8      temp[4];
+       u8      volt[4];
+       u8      unallocate_2[3];
+       u8      cc_dmi;
+};
+
+/*
+ * Diagnostic: Data Fields -- Address A2h
+ * Diagnostic and control/status extended field total 24 bytes
+ */
+struct sfp_diag_ext_s {
+       u8      diag[SFP_DIAGMON_SIZE];
+       u8      unalloc1[4];
+       u8      status_ctl;
+       u8      rsvd;
+       u8      alarm_flags[2];
+       u8      unalloc2[2];
+       u8      warning_flags[2];
+       u8      ext_status_ctl[2];
+};
+
+struct sfp_mem_s {
+       struct sfp_srlid_base_s srlid_base;
+       struct sfp_srlid_ext_s  srlid_ext;
+       struct sfp_diag_base_s  diag_base;
+       struct sfp_diag_ext_s   diag_ext;
+};
+
+/*
+ * transceiver codes (SFF-8472 Rev 10.2 Table 3.5)
+ */
+union sfp_xcvr_e10g_code_u {
+       u8              b;
+       struct {
+#ifdef __BIGENDIAN
+               u8      e10g_unall:1;   /* 10G Ethernet compliance */
+               u8      e10g_lrm:1;
+               u8      e10g_lr:1;
+               u8      e10g_sr:1;
+               u8      ib_sx:1;    /* Infiniband compliance */
+               u8      ib_lx:1;
+               u8      ib_cu_a:1;
+               u8      ib_cu_p:1;
+#else
+               u8      ib_cu_p:1;
+               u8      ib_cu_a:1;
+               u8      ib_lx:1;
+               u8      ib_sx:1;    /* Infiniband compliance */
+               u8      e10g_sr:1;
+               u8      e10g_lr:1;
+               u8      e10g_lrm:1;
+               u8      e10g_unall:1;   /* 10G Ethernet compliance */
+#endif
+       } r;
+};
+
+union sfp_xcvr_so1_code_u {
+       u8              b;
+       struct {
+               u8      escon:2;    /* ESCON compliance code */
+               u8      oc192_reach:1;  /* SONET compliance code */
+               u8      so_reach:2;
+               u8      oc48_reach:3;
+       } r;
+};
+
+union sfp_xcvr_so2_code_u {
+       u8              b;
+       struct {
+               u8      reserved:1;
+               u8      oc12_reach:3;   /* OC12 reach */
+               u8      reserved1:1;
+               u8      oc3_reach:3;    /* OC3 reach */
+       } r;
+};
+
+union sfp_xcvr_eth_code_u {
+       u8              b;
+       struct {
+               u8      base_px:1;
+               u8      base_bx10:1;
+               u8      e100base_fx:1;
+               u8      e100base_lx:1;
+               u8      e1000base_t:1;
+               u8      e1000base_cx:1;
+               u8      e1000base_lx:1;
+               u8      e1000base_sx:1;
+       } r;
+};
+
+struct sfp_xcvr_fc1_code_s {
+       u8      link_len:5; /* FC link length */
+       u8      xmtr_tech2:3;
+       u8      xmtr_tech1:7;   /* FC transmitter technology */
+       u8      reserved1:1;
+};
+
+union sfp_xcvr_fc2_code_u {
+       u8              b;
+       struct {
+               u8      tw_media:1; /* twin axial pair (tw) */
+               u8      tp_media:1; /* shielded twisted pair (sp) */
+               u8      mi_media:1; /* miniature coax (mi) */
+               u8      tv_media:1; /* video coax (tv) */
+               u8      m6_media:1; /* multimode, 62.5m (m6) */
+               u8      m5_media:1; /* multimode, 50m (m5) */
+               u8      reserved:1;
+               u8      sm_media:1; /* single mode (sm) */
+       } r;
+};
+
+union sfp_xcvr_fc3_code_u {
+       u8              b;
+       struct {
+#ifdef __BIGENDIAN
+               u8      rsv4:1;
+               u8      mb800:1;    /* 800 Mbytes/sec */
+               u8      mb1600:1;   /* 1600 Mbytes/sec */
+               u8      mb400:1;    /* 400 Mbytes/sec */
+               u8      rsv2:1;
+               u8      mb200:1;    /* 200 Mbytes/sec */
+               u8      rsv1:1;
+               u8      mb100:1;    /* 100 Mbytes/sec */
+#else
+               u8      mb100:1;    /* 100 Mbytes/sec */
+               u8      rsv1:1;
+               u8      mb200:1;    /* 200 Mbytes/sec */
+               u8      rsv2:1;
+               u8      mb400:1;    /* 400 Mbytes/sec */
+               u8      mb1600:1;   /* 1600 Mbytes/sec */
+               u8      mb800:1;    /* 800 Mbytes/sec */
+               u8      rsv4:1;
+#endif
+       } r;
+};
+
+struct sfp_xcvr_s {
+       union sfp_xcvr_e10g_code_u      e10g;
+       union sfp_xcvr_so1_code_u       so1;
+       union sfp_xcvr_so2_code_u       so2;
+       union sfp_xcvr_eth_code_u       eth;
+       struct sfp_xcvr_fc1_code_s      fc1;
+       union sfp_xcvr_fc2_code_u       fc2;
+       union sfp_xcvr_fc3_code_u       fc3;
+};
+
+/*
+ *     Flash module specific
+ */
+#define BFA_FLASH_PART_ENTRY_SIZE      32      /* partition entry size */
+#define BFA_FLASH_PART_MAX             32      /* maximal # of partitions */
+
+enum bfa_flash_part_type {
+       BFA_FLASH_PART_OPTROM   = 1,    /* option rom partition */
+       BFA_FLASH_PART_FWIMG    = 2,    /* firmware image partition */
+       BFA_FLASH_PART_FWCFG    = 3,    /* firmware tuneable config */
+       BFA_FLASH_PART_DRV      = 4,    /* IOC driver config */
+       BFA_FLASH_PART_BOOT     = 5,    /* boot config */
+       BFA_FLASH_PART_ASIC     = 6,    /* asic bootstrap configuration */
+       BFA_FLASH_PART_MFG      = 7,    /* manufacturing block partition */
+       BFA_FLASH_PART_OPTROM2  = 8,    /* 2nd option rom partition */
+       BFA_FLASH_PART_VPD      = 9,    /* vpd data of OEM info */
+       BFA_FLASH_PART_PBC      = 10,   /* pre-boot config */
+       BFA_FLASH_PART_BOOTOVL  = 11,   /* boot overlay partition */
+       BFA_FLASH_PART_LOG      = 12,   /* firmware log partition */
+       BFA_FLASH_PART_PXECFG   = 13,   /* pxe boot config partition */
+       BFA_FLASH_PART_PXEOVL   = 14,   /* pxe boot overlay partition */
+       BFA_FLASH_PART_PORTCFG  = 15,   /* port cfg partition */
+       BFA_FLASH_PART_ASICBK   = 16,   /* asic backup partition */
+};
+
+/*
+ * flash partition attributes
+ */
+struct bfa_flash_part_attr_s {
+       u32     part_type;      /* partition type */
+       u32     part_instance;  /* partition instance */
+       u32     part_off;       /* partition offset */
+       u32     part_size;      /* partition size */
+       u32     part_len;       /* partition content length */
+       u32     part_status;    /* partition status */
+       char    rsv[BFA_FLASH_PART_ENTRY_SIZE - 24];
+};
+
+/*
+ * flash attributes
+ */
+struct bfa_flash_attr_s {
+       u32     status; /* flash overall status */
+       u32     npart;  /* num of partitions */
+       struct bfa_flash_part_attr_s part[BFA_FLASH_PART_MAX];
+};
+
+/*
+ *     DIAG module specific
+ */
+#define LB_PATTERN_DEFAULT     0xB5B5B5B5
+#define QTEST_CNT_DEFAULT      10
+#define QTEST_PAT_DEFAULT      LB_PATTERN_DEFAULT
+
+struct bfa_diag_memtest_s {
+       u8      algo;
+       u8      rsvd[7];
+};
+
+struct bfa_diag_memtest_result {
+       u32     status;
+       u32     addr;
+       u32     exp; /* expect value read from reg */
+       u32     act; /* actually value read */
+       u32     err_status;             /* error status reg */
+       u32     err_status1;    /* extra error info reg */
+       u32     err_addr; /* error address reg */
+       u8      algo;
+       u8      rsv[3];
+};
+
+struct bfa_diag_loopback_result_s {
+       u32     numtxmfrm;      /* no. of transmit frame */
+       u32     numosffrm;      /* no. of outstanding frame */
+       u32     numrcvfrm;      /* no. of received good frame */
+       u32     badfrminf;      /* mis-match info */
+       u32     badfrmnum;      /* mis-match fram number */
+       u8      status;         /* loopback test result */
+       u8      rsvd[3];
+};
+
+struct bfa_diag_ledtest_s {
+       u32     cmd;    /* bfa_led_op_t */
+       u32     color;  /* bfa_led_color_t */
+       u16     freq;   /* no. of blinks every 10 secs */
+       u8      led;    /* bitmap of LEDs to be tested */
+       u8      rsvd[5];
+};
+
+struct bfa_diag_loopback_s {
+       u32     loopcnt;
+       u32     pattern;
+       u8      lb_mode;    /* bfa_port_opmode_t */
+       u8      speed;      /* bfa_port_speed_t */
+       u8      rsvd[2];
+};
+
+/*
+ *     PHY module specific
+ */
+enum bfa_phy_status_e {
+       BFA_PHY_STATUS_GOOD     = 0, /* phy is good */
+       BFA_PHY_STATUS_NOT_PRESENT      = 1, /* phy does not exist */
+       BFA_PHY_STATUS_BAD      = 2, /* phy is bad */
+};
+
+/*
+ * phy attributes for phy query
+ */
+struct bfa_phy_attr_s {
+       u32     status;         /* phy present/absent status */
+       u32     length;         /* firmware length */
+       u32     fw_ver;         /* firmware version */
+       u32     an_status;      /* AN status */
+       u32     pma_pmd_status; /* PMA/PMD link status */
+       u32     pma_pmd_signal; /* PMA/PMD signal detect */
+       u32     pcs_status;     /* PCS link status */
+};
+
+/*
+ * phy stats
+ */
+struct bfa_phy_stats_s {
+       u32     status;         /* phy stats status */
+       u32     link_breaks;    /* Num of link breaks after linkup */
+       u32     pma_pmd_fault;  /* NPMA/PMD fault */
+       u32     pcs_fault;      /* PCS fault */
+       u32     speed_neg;      /* Num of speed negotiation */
+       u32     tx_eq_training; /* Num of TX EQ training */
+       u32     tx_eq_timeout;  /* Num of TX EQ timeout */
+       u32     crc_error;      /* Num of CRC errors */
+};
+
+#pragma pack()
+
 #endif /* __BFA_DEFS_H__ */
index 191d34a58b9cf15e5167b419832fc287b0201779..3bbc583f65cfeee82a9c2e6aace0b4a7490a025f 100644 (file)
@@ -90,12 +90,14 @@ enum bfa_lport_role {
  * FCS port configuration.
  */
 struct bfa_lport_cfg_s {
-    wwn_t             pwwn;       /*  port wwn */
-    wwn_t             nwwn;       /*  node wwn */
-    struct bfa_lport_symname_s  sym_name;   /*  vm port symbolic name */
-    bfa_boolean_t       preboot_vp;  /*  vport created from PBC */
-    enum bfa_lport_role     roles;      /*  FCS port roles */
-    u8      tag[16];   /*  opaque tag from application */
+       wwn_t          pwwn;       /*  port wwn */
+       wwn_t          nwwn;       /*  node wwn */
+       struct bfa_lport_symname_s  sym_name;   /*  vm port symbolic name */
+       enum bfa_lport_role roles;      /* FCS port roles */
+       u32     rsvd;
+       bfa_boolean_t   preboot_vp;  /*  vport created from PBC */
+       u8      tag[16];        /* opaque tag from application */
+       u8      padding[4];
 };
 
 /*
@@ -249,12 +251,13 @@ enum bfa_vport_state {
        BFA_FCS_VPORT_FDISC_SEND        = 2,
        BFA_FCS_VPORT_FDISC             = 3,
        BFA_FCS_VPORT_FDISC_RETRY       = 4,
-       BFA_FCS_VPORT_ONLINE            = 5,
-       BFA_FCS_VPORT_DELETING          = 6,
-       BFA_FCS_VPORT_CLEANUP           = 6,
-       BFA_FCS_VPORT_LOGO_SEND         = 7,
-       BFA_FCS_VPORT_LOGO              = 8,
-       BFA_FCS_VPORT_ERROR             = 9,
+       BFA_FCS_VPORT_FDISC_RSP_WAIT    = 5,
+       BFA_FCS_VPORT_ONLINE            = 6,
+       BFA_FCS_VPORT_DELETING          = 7,
+       BFA_FCS_VPORT_CLEANUP           = 8,
+       BFA_FCS_VPORT_LOGO_SEND         = 9,
+       BFA_FCS_VPORT_LOGO              = 10,
+       BFA_FCS_VPORT_ERROR             = 11,
        BFA_FCS_VPORT_MAX_STATE,
 };
 
index 207f598877c74d3d1532b182a5392b8d22313c66..863c6ba7d5eb0c817bfa7f3975a9d3de981cc8be 100644 (file)
@@ -47,13 +47,12 @@ struct bfa_iocfc_fwcfg_s {
        u16        num_rports;  /*  number of remote ports      */
        u16        num_ioim_reqs;       /*  number of IO reqs           */
        u16        num_tskim_reqs;      /*  task management requests    */
-       u16        num_iotm_reqs;       /*  number of TM IO reqs        */
-       u16        num_tsktm_reqs;      /*  TM task management requests*/
+       u16        num_fwtio_reqs;      /* number of TM IO reqs in FW */
        u16        num_fcxp_reqs;       /*  unassisted FC exchanges     */
        u16        num_uf_bufs; /*  unsolicited recv buffers    */
        u8              num_cqs;
        u8              fw_tick_res;    /*  FW clock resolution in ms */
-       u8              rsvd[4];
+       u8              rsvd[2];
 };
 #pragma pack()
 
@@ -66,8 +65,12 @@ struct bfa_iocfc_drvcfg_s {
        u16         ioc_recover;        /*  IOC recovery mode             */
        u16         min_cfg;    /*  minimum configuration         */
        u16        path_tov;    /*  device path timeout   */
+       u16             num_tio_reqs;   /*!< number of TM IO reqs       */
+       u8              port_mode;
+       u8              rsvd_a;
        bfa_boolean_t   delay_comp; /*  delay completion of
                                                        failed inflight IOs */
+       u16             num_ttsk_reqs;   /* TM task management requests */
        u32             rsvd;
 };
 
@@ -82,7 +85,7 @@ struct bfa_iocfc_cfg_s {
 /*
  * IOC firmware IO stats
  */
-struct bfa_fw_io_stats_s {
+struct bfa_fw_ioim_stats_s {
        u32     host_abort;             /*  IO aborted by host driver*/
        u32     host_cleanup;           /*  IO clean up by host driver */
 
@@ -152,6 +155,54 @@ struct bfa_fw_io_stats_s {
                                                 */
 };
 
+struct bfa_fw_tio_stats_s {
+       u32     tio_conf_proc;  /* TIO CONF processed */
+       u32     tio_conf_drop;      /* TIO CONF dropped */
+       u32     tio_cleanup_req;    /* TIO cleanup requested */
+       u32     tio_cleanup_comp;   /* TIO cleanup completed */
+       u32     tio_abort_rsp;      /* TIO abort response */
+       u32     tio_abort_rsp_comp; /* TIO abort rsp completed */
+       u32     tio_abts_req;       /* TIO ABTS requested */
+       u32     tio_abts_ack;       /* TIO ABTS ack-ed */
+       u32     tio_abts_ack_nocomp; /* TIO ABTS ack-ed but not completed */
+       u32     tio_abts_tmo;       /* TIO ABTS timeout */
+       u32     tio_snsdata_dma;    /* TIO sense data DMA */
+       u32     tio_rxwchan_wait; /* TIO waiting for RX wait channel */
+       u32     tio_rxwchan_avail; /* TIO RX wait channel available */
+       u32     tio_hit_bls;        /* TIO IOH BLS event */
+       u32     tio_uf_recv;        /* TIO received UF */
+       u32     tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */
+       u32     tio_wr_invalid_sm;/* TIO write reqst in wrong state machine */
+
+       u32     ds_rxwchan_wait; /* DS waiting for RX wait channel */
+       u32     ds_rxwchan_avail; /* DS RX wait channel available */
+       u32     ds_unaligned_rd;    /* DS unaligned read */
+       u32     ds_rdcomp_invalid_sm; /* DS read completed in wrong state machine */
+       u32     ds_wrcomp_invalid_sm; /* DS write completed in wrong state machine */
+       u32     ds_flush_req;       /* DS flush requested */
+       u32     ds_flush_comp;      /* DS flush completed */
+       u32     ds_xfrdy_exp;       /* DS XFER_RDY expired */
+       u32     ds_seq_cnt_err;     /* DS seq cnt error */
+       u32     ds_seq_len_err;     /* DS seq len error */
+       u32     ds_data_oor;        /* DS data out of order */
+       u32     ds_hit_bls;     /* DS hit BLS */
+       u32     ds_edtov_timer_exp; /* DS edtov expired */
+       u32     ds_cpu_owned;       /* DS cpu owned */
+       u32     ds_hit_class2;      /* DS hit class2 */
+       u32     ds_length_err;      /* DS length error */
+       u32     ds_ro_ooo_err;      /* DS relative offset out-of-order error */
+       u32     ds_rectov_timer_exp;    /* DS rectov expired */
+       u32     ds_unexp_fr_err;    /* DS unexp frame error */
+};
+
+/*
+ * IOC firmware IO stats
+ */
+struct bfa_fw_io_stats_s {
+       struct bfa_fw_ioim_stats_s      ioim_stats;
+       struct bfa_fw_tio_stats_s       tio_stats;
+};
+
 /*
  * IOC port firmware stats
  */
@@ -205,6 +256,7 @@ struct bfa_fw_port_lksm_stats_s {
     u32    nos_tx;             /*  No. of times NOS tx started         */
     u32    hwsm_lrr_rx;        /*  No. of times LRR rx-ed by HWSM      */
     u32    hwsm_lr_rx;         /*  No. of times LR rx-ed by HWSM      */
+       u32     bbsc_lr;        /* LKSM LR tx for credit recovery       */
 };
 
 struct bfa_fw_port_snsm_stats_s {
@@ -216,6 +268,7 @@ struct bfa_fw_port_snsm_stats_s {
     u32    error_resets;       /*  error resets initiated by upsm      */
     u32    sync_lost;          /*  Sync loss count                     */
     u32    sig_lost;           /*  Signal loss count                   */
+       u32     asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
 };
 
 struct bfa_fw_port_physm_stats_s {
@@ -266,8 +319,8 @@ struct bfa_fw_fcoe_stats_s {
  * IOC firmware FCoE port stats
  */
 struct bfa_fw_fcoe_port_stats_s {
-    struct bfa_fw_fcoe_stats_s  fcoe_stats;
-    struct bfa_fw_fip_stats_s   fip_stats;
+       struct bfa_fw_fcoe_stats_s  fcoe_stats;
+       struct bfa_fw_fip_stats_s   fip_stats;
 };
 
 /*
@@ -416,6 +469,7 @@ struct bfa_fw_stats_s {
  * QoS states
  */
 enum bfa_qos_state {
+       BFA_QOS_DISABLED = 0,           /* QoS is disabled */
        BFA_QOS_ONLINE = 1,             /*  QoS is online */
        BFA_QOS_OFFLINE = 2,            /*  QoS is offline */
 };
@@ -618,6 +672,12 @@ struct bfa_itnim_iostats_s {
        u32     tm_iocdowns;            /*  TM cleaned-up due to IOC down   */
        u32     tm_cleanups;            /*  TM cleanup requests */
        u32     tm_cleanup_comps;       /*  TM cleanup completions      */
+       u32     lm_lun_across_sg;       /*  LM lun is across sg data buf */
+       u32     lm_lun_not_sup;         /*  LM lun not supported */
+       u32     lm_rpl_data_changed;    /*  LM report-lun data changed */
+       u32     lm_wire_residue_changed; /* LM report-lun rsp residue changed */
+       u32     lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
+       u32     lm_lun_not_rdy;         /* LM lun not ready */
 };
 
 /* Modify char* port_stt[] in bfal_port.c if a new state was added */
@@ -636,6 +696,7 @@ enum bfa_port_states {
        BFA_PORT_ST_FWMISMATCH          = 12,
        BFA_PORT_ST_PREBOOT_DISABLED    = 13,
        BFA_PORT_ST_TOGGLING_QWAIT      = 14,
+       BFA_PORT_ST_ACQ_ADDR            = 15,
        BFA_PORT_ST_MAX_STATE,
 };
 
@@ -732,7 +793,50 @@ enum bfa_port_linkstate_rsn {
        CEE_ISCSI_PRI_PFC_OFF                   = 42,
        CEE_ISCSI_PRI_OVERLAP_FCOE_PRI          = 43
 };
+
+#define MAX_LUN_MASK_CFG 16
+
+/*
+ * Initially flash content may be fff. On making LUN mask enable and disable
+ * state chnage.  when report lun command is being processed it goes from
+ * BFA_LUN_MASK_ACTIVE to BFA_LUN_MASK_FETCH and comes back to
+ * BFA_LUN_MASK_ACTIVE.
+ */
+enum bfa_ioim_lun_mask_state_s {
+       BFA_IOIM_LUN_MASK_INACTIVE = 0,
+       BFA_IOIM_LUN_MASK_ACTIVE = 1,
+       BFA_IOIM_LUN_MASK_FETCHED = 2,
+};
+
+enum bfa_lunmask_state_s {
+       BFA_LUNMASK_DISABLED = 0x00,
+       BFA_LUNMASK_ENABLED = 0x01,
+       BFA_LUNMASK_MINCFG = 0x02,
+       BFA_LUNMASK_UNINITIALIZED = 0xff,
+};
+
 #pragma pack(1)
+/*
+ * LUN mask configuration
+ */
+struct bfa_lun_mask_s {
+       wwn_t           lp_wwn;
+       wwn_t           rp_wwn;
+       struct scsi_lun lun;
+       u8              ua;
+       u8              rsvd[3];
+       u16             rp_tag;
+       u8              lp_tag;
+       u8              state;
+};
+
+#define MAX_LUN_MASK_CFG 16
+struct bfa_lunmask_cfg_s {
+       u32     status;
+       u32     rsvd;
+       struct bfa_lun_mask_s   lun_list[MAX_LUN_MASK_CFG];
+};
+
 /*
  *      Physical port configuration
  */
@@ -748,6 +852,10 @@ struct bfa_port_cfg_s {
        u8       tx_bbcredit;   /*  transmit buffer credits     */
        u8       ratelimit;     /*  ratelimit enabled or not    */
        u8       trl_def_speed; /*  ratelimit default speed     */
+       u8      bb_scn;         /*  BB_SCN value from FLOGI Exchg */
+       u8      bb_scn_state;   /*  Config state of BB_SCN */
+       u8      faa_state;      /*  FAA enabled/disabled        */
+       u8      rsvd[1];
        u16 path_tov;   /*  device path timeout */
        u16 q_depth;    /*  SCSI Queue depth            */
 };
@@ -783,7 +891,7 @@ struct bfa_port_attr_s {
        enum bfa_port_topology  topology;       /*  current topology */
        bfa_boolean_t           beacon;         /*  current beacon status */
        bfa_boolean_t           link_e2e_beacon; /*  link beacon is on */
-       bfa_boolean_t           plog_enabled;   /*  portlog is enabled */
+       bfa_boolean_t   bbsc_op_status; /* fc credit recovery oper state */
 
        /*
         * Dynamic field - info from FCS
@@ -792,12 +900,10 @@ struct bfa_port_attr_s {
        enum bfa_port_type      port_type;      /*  current topology */
        u32             loopback;       /*  external loopback */
        u32             authfail;       /*  auth fail state */
-       bfa_boolean_t           io_profile;     /*  get it from fcpim mod */
-       u8                      pad[4];         /*  for 64-bit alignement */
 
        /* FCoE specific  */
        u16             fcoe_vlan;
-       u8                      rsvd1[6];
+       u8                      rsvd1[2];
 };
 
 /*
@@ -987,6 +1093,19 @@ struct bfa_itnim_ioprofile_s {
        struct bfa_itnim_latency_s io_latency;
 };
 
+/*
+ *     vHBA port attribute values.
+ */
+struct bfa_vhba_attr_s {
+       wwn_t   nwwn;       /* node wwn */
+       wwn_t   pwwn;       /* port wwn */
+       u32     pid;        /* port ID */
+       bfa_boolean_t       io_profile; /* get it from fcpim mod */
+       bfa_boolean_t       plog_enabled;   /* portlog is enabled */
+       u16     path_tov;
+       u8      rsvd[2];
+};
+
 /*
  * FC physical port statistics.
  */
@@ -1020,6 +1139,9 @@ struct bfa_port_fc_stats_s {
        u64     bad_os_count;   /*  Invalid ordered sets        */
        u64     err_enc_out;    /*  Encoding err nonframe_8b10b */
        u64     err_enc;        /*  Encoding err frame_8b10b    */
+       u64     bbsc_frames_lost; /* Credit Recovery-Frames Lost  */
+       u64     bbsc_credits_lost; /* Credit Recovery-Credits Lost */
+       u64     bbsc_link_resets; /* Credit Recovery-Link Resets   */
 };
 
 /*
@@ -1078,4 +1200,131 @@ union bfa_port_stats_u {
        struct bfa_port_eth_stats_s     eth;
 };
 
+struct bfa_port_cfg_mode_s {
+       u16             max_pf;
+       u16             max_vf;
+       enum bfa_mode_s mode;
+};
+
+#pragma pack(1)
+
+#define BFA_CEE_LLDP_MAX_STRING_LEN    (128)
+#define BFA_CEE_DCBX_MAX_PRIORITY      (8)
+#define BFA_CEE_DCBX_MAX_PGID          (8)
+
+struct bfa_cee_lldp_str_s {
+       u8      sub_type;
+       u8      len;
+       u8      rsvd[2];
+       u8      value[BFA_CEE_LLDP_MAX_STRING_LEN];
+};
+
+struct bfa_cee_lldp_cfg_s {
+       struct bfa_cee_lldp_str_s chassis_id;
+       struct bfa_cee_lldp_str_s port_id;
+       struct bfa_cee_lldp_str_s port_desc;
+       struct bfa_cee_lldp_str_s sys_name;
+       struct bfa_cee_lldp_str_s sys_desc;
+       struct bfa_cee_lldp_str_s mgmt_addr;
+       u16     time_to_live;
+       u16     enabled_system_cap;
+};
+
+/* CEE/DCBX parameters */
+struct bfa_cee_dcbx_cfg_s {
+       u8      pgid[BFA_CEE_DCBX_MAX_PRIORITY];
+       u8      pg_percentage[BFA_CEE_DCBX_MAX_PGID];
+       u8      pfc_primap; /* bitmap of priorties with PFC enabled */
+       u8      fcoe_primap; /* bitmap of priorities used for FcoE traffic */
+       u8      iscsi_primap; /* bitmap of priorities used for iSCSI traffic */
+       u8      dcbx_version; /* operating version:CEE or preCEE */
+       u8      lls_fcoe; /* FCoE Logical Link Status */
+       u8      lls_lan; /* LAN Logical Link Status */
+       u8      rsvd[2];
+};
+
+/* CEE Query */
+struct bfa_cee_attr_s {
+       u8      cee_status;
+       u8      error_reason;
+       struct bfa_cee_lldp_cfg_s lldp_remote;
+       struct bfa_cee_dcbx_cfg_s dcbx_remote;
+       mac_t src_mac;
+       u8      link_speed;
+       u8      nw_priority;
+       u8      filler[2];
+};
+
+/* LLDP/DCBX/CEE Statistics */
+struct bfa_cee_stats_s {
+       u32             lldp_tx_frames;         /* LLDP Tx Frames */
+       u32             lldp_rx_frames;         /* LLDP Rx Frames */
+       u32             lldp_rx_frames_invalid; /* LLDP Rx Frames invalid */
+       u32             lldp_rx_frames_new;     /* LLDP Rx Frames new */
+       u32             lldp_tlvs_unrecognized; /* LLDP Rx unrecog. TLVs */
+       u32             lldp_rx_shutdown_tlvs;  /* LLDP Rx shutdown TLVs */
+       u32             lldp_info_aged_out;     /* LLDP remote info aged */
+       u32             dcbx_phylink_ups;       /* DCBX phy link ups */
+       u32             dcbx_phylink_downs;     /* DCBX phy link downs */
+       u32             dcbx_rx_tlvs;           /* DCBX Rx TLVs */
+       u32             dcbx_rx_tlvs_invalid;   /* DCBX Rx TLVs invalid */
+       u32             dcbx_control_tlv_error; /* DCBX control TLV errors */
+       u32             dcbx_feature_tlv_error; /* DCBX feature TLV errors */
+       u32             dcbx_cee_cfg_new;       /* DCBX new CEE cfg rcvd */
+       u32             cee_status_down;        /* DCB status down */
+       u32             cee_status_up;          /* DCB status up */
+       u32             cee_hw_cfg_changed;     /* DCB hw cfg changed */
+       u32             cee_rx_invalid_cfg;     /* DCB invalid cfg */
+};
+
+#pragma pack()
+
+/*
+ *                     AEN related definitions
+ */
+#define BFAD_NL_VENDOR_ID (((u64)0x01 << SCSI_NL_VID_TYPE_SHIFT) \
+                          | BFA_PCI_VENDOR_ID_BROCADE)
+
+/* BFA remote port events */
+enum bfa_rport_aen_event {
+       BFA_RPORT_AEN_ONLINE     = 1,   /* RPort online event */
+       BFA_RPORT_AEN_OFFLINE    = 2,   /* RPort offline event */
+       BFA_RPORT_AEN_DISCONNECT = 3,   /* RPort disconnect event */
+       BFA_RPORT_AEN_QOS_PRIO   = 4,   /* QOS priority change event */
+       BFA_RPORT_AEN_QOS_FLOWID = 5,   /* QOS flow Id change event */
+};
+
+struct bfa_rport_aen_data_s {
+       u16             vf_id;  /* vf_id of this logical port */
+       u16             rsvd[3];
+       wwn_t           ppwwn;  /* WWN of its physical port */
+       wwn_t           lpwwn;  /* WWN of this logical port */
+       wwn_t           rpwwn;  /* WWN of this remote port */
+       union {
+               struct bfa_rport_qos_attr_s qos;
+       } priv;
+};
+
+union bfa_aen_data_u {
+       struct bfa_adapter_aen_data_s   adapter;
+       struct bfa_port_aen_data_s      port;
+       struct bfa_lport_aen_data_s     lport;
+       struct bfa_rport_aen_data_s     rport;
+       struct bfa_itnim_aen_data_s     itnim;
+       struct bfa_audit_aen_data_s     audit;
+       struct bfa_ioc_aen_data_s       ioc;
+};
+
+#define BFA_AEN_MAX_ENTRY      512
+
+struct bfa_aen_entry_s {
+       struct list_head        qe;
+       enum bfa_aen_category   aen_category;
+       u32                     aen_type;
+       union bfa_aen_data_u    aen_data;
+       struct timeval          aen_tv;
+       u32                     seq_num;
+       u32                     bfad_num;
+};
+
 #endif /* __BFA_DEFS_SVC_H__ */
index bf0067e0fd0da5592401e57e36fcaf5e7ca1b72f..50b6a1c86195ac6d6c394295fcdd8150dbd43852 100644 (file)
@@ -56,6 +56,161 @@ struct scsi_cdb_s {
 
 #define SCSI_MAX_ALLOC_LEN      0xFF    /* maximum allocarion length */
 
+#define SCSI_SENSE_CUR_ERR     0x70
+#define SCSI_SENSE_DEF_ERR     0x71
+
+/*
+ * SCSI additional sense codes
+ */
+#define SCSI_ASC_LUN_NOT_READY         0x04
+#define SCSI_ASC_LUN_NOT_SUPPORTED     0x25
+#define SCSI_ASC_TOCC                  0x3F
+
+/*
+ * SCSI additional sense code qualifiers
+ */
+#define SCSI_ASCQ_MAN_INTR_REQ         0x03    /* manual intervention req */
+#define SCSI_ASCQ_RL_DATA_CHANGED      0x0E    /* report luns data changed */
+
+/*
+ * Methods of reporting informational exceptions
+ */
+#define SCSI_MP_IEC_UNIT_ATTN          0x2     /* generate unit attention */
+
+struct scsi_report_luns_data_s {
+       u32             lun_list_length;        /* length of LUN list length */
+       u32             reserved;
+       struct scsi_lun lun[1];                 /* first LUN in lun list */
+};
+
+struct scsi_inquiry_vendor_s {
+       u8      vendor_id[8];
+};
+
+struct scsi_inquiry_prodid_s {
+       u8      product_id[16];
+};
+
+struct scsi_inquiry_prodrev_s {
+       u8      product_rev[4];
+};
+
+struct scsi_inquiry_data_s {
+#ifdef __BIG_ENDIAN
+       u8              peripheral_qual:3;      /* peripheral qualifier */
+       u8              device_type:5;          /* peripheral device type */
+       u8              rmb:1;                  /* removable medium bit */
+       u8              device_type_mod:7;      /* device type modifier */
+       u8              version;
+       u8              aenc:1;         /* async evt notification capability */
+       u8              trm_iop:1;      /* terminate I/O process */
+       u8              norm_aca:1;     /* normal ACA supported */
+       u8              hi_support:1;   /* SCSI-3: supports REPORT LUNS */
+       u8              rsp_data_format:4;
+       u8              additional_len;
+       u8              sccs:1;
+       u8              reserved1:7;
+       u8              reserved2:1;
+       u8              enc_serv:1;     /* enclosure service component */
+       u8              reserved3:1;
+       u8              multi_port:1;   /* multi-port device */
+       u8              m_chngr:1;      /* device in medium transport element */
+       u8              ack_req_q:1;    /* SIP specific bit */
+       u8              addr32:1;       /* SIP specific bit */
+       u8              addr16:1;       /* SIP specific bit */
+       u8              rel_adr:1;      /* relative address */
+       u8              w_bus32:1;
+       u8              w_bus16:1;
+       u8              synchronous:1;
+       u8              linked_commands:1;
+       u8              trans_dis:1;
+       u8              cmd_queue:1;    /* command queueing supported */
+       u8              soft_reset:1;   /* soft reset alternative (VS) */
+#else
+       u8              device_type:5;  /* peripheral device type */
+       u8              peripheral_qual:3; /* peripheral qualifier */
+       u8              device_type_mod:7; /* device type modifier */
+       u8              rmb:1;          /* removable medium bit */
+       u8              version;
+       u8              rsp_data_format:4;
+       u8              hi_support:1;   /* SCSI-3: supports REPORT LUNS */
+       u8              norm_aca:1;     /* normal ACA supported */
+       u8              terminate_iop:1;/* terminate I/O process */
+       u8              aenc:1;         /* async evt notification capability */
+       u8              additional_len;
+       u8              reserved1:7;
+       u8              sccs:1;
+       u8              addr16:1;       /* SIP specific bit */
+       u8              addr32:1;       /* SIP specific bit */
+       u8              ack_req_q:1;    /* SIP specific bit */
+       u8              m_chngr:1;      /* device in medium transport element */
+       u8              multi_port:1;   /* multi-port device */
+       u8              reserved3:1;    /* TBD - Vendor Specific */
+       u8              enc_serv:1;     /* enclosure service component */
+       u8              reserved2:1;
+       u8              soft_seset:1;   /* soft reset alternative (VS) */
+       u8              cmd_queue:1;    /* command queueing supported */
+       u8              trans_dis:1;
+       u8              linked_commands:1;
+       u8              synchronous:1;
+       u8              w_bus16:1;
+       u8              w_bus32:1;
+       u8              rel_adr:1;      /* relative address */
+#endif
+       struct scsi_inquiry_vendor_s    vendor_id;
+       struct scsi_inquiry_prodid_s    product_id;
+       struct scsi_inquiry_prodrev_s   product_rev;
+       u8              vendor_specific[20];
+       u8              reserved4[40];
+};
+
+/*
+ *     SCSI sense data format
+ */
+struct scsi_sense_s {
+#ifdef __BIG_ENDIAN
+       u8              valid:1;
+       u8              rsp_code:7;
+#else
+       u8              rsp_code:7;
+       u8              valid:1;
+#endif
+       u8              seg_num;
+#ifdef __BIG_ENDIAN
+       u8              file_mark:1;
+       u8              eom:1;          /* end of media */
+       u8              ili:1;          /* incorrect length indicator */
+       u8              reserved:1;
+       u8              sense_key:4;
+#else
+       u8              sense_key:4;
+       u8              reserved:1;
+       u8              ili:1;          /* incorrect length indicator */
+       u8              eom:1;          /* end of media */
+       u8              file_mark:1;
+#endif
+       u8              information[4]; /* device-type or cmd specific info */
+       u8              add_sense_length; /* additional sense length */
+       u8              command_info[4];/* command specific information */
+       u8              asc;            /* additional sense code */
+       u8              ascq;           /* additional sense code qualifier */
+       u8              fru_code;       /* field replaceable unit code */
+#ifdef __BIG_ENDIAN
+       u8              sksv:1;         /* sense key specific valid */
+       u8              c_d:1;          /* command/data bit */
+       u8              res1:2;
+       u8              bpv:1;          /* bit pointer valid */
+       u8              bpointer:3;     /* bit pointer */
+#else
+       u8              bpointer:3;     /* bit pointer */
+       u8              bpv:1;          /* bit pointer valid */
+       u8              res1:2;
+       u8              c_d:1;          /* command/data bit */
+       u8              sksv:1;         /* sense key specific valid */
+#endif
+       u8              fpointer[2];    /* field pointer */
+};
+
 /*
  * Fibre Channel Header Structure (FCHS) definition
  */
@@ -1021,7 +1176,7 @@ struct fc_symname_s {
 #define FC_ED_TOV      2
 #define FC_REC_TOV     (FC_ED_TOV + 1)
 #define FC_RA_TOV      10
-#define FC_ELS_TOV     (2 * FC_RA_TOV)
+#define FC_ELS_TOV     ((2 * FC_RA_TOV) + 1)
 #define FC_FCCT_TOV    (3 * FC_RA_TOV)
 
 /*
@@ -1048,15 +1203,6 @@ struct fc_vft_s {
        u32        res_c:24;
 };
 
-/*
- * FCP
- */
-enum {
-       FCP_RJT         = 0x01000000,   /* SRR reject */
-       FCP_SRR_ACCEPT  = 0x02000000,   /* SRR accept */
-       FCP_SRR         = 0x14000000,   /* Sequence Retransmission Request */
-};
-
 /*
  * FCP_CMND definitions
  */
index b7e253451654774f89c48cd6d428a557690a222f..17b59b8b564425fc0a8bc38dcf25f2af08f780a9 100644 (file)
@@ -94,7 +94,6 @@ fcbuild_init(void)
         */
        plogi_tmpl.csp.verhi = FC_PH_VER_PH_3;
        plogi_tmpl.csp.verlo = FC_PH_VER_4_3;
-       plogi_tmpl.csp.bbcred = cpu_to_be16(0x0004);
        plogi_tmpl.csp.ciro = 0x1;
        plogi_tmpl.csp.cisc = 0x0;
        plogi_tmpl.csp.altbbcred = 0x0;
@@ -156,6 +155,22 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
         */
 }
 
+static void
+fc_gsresp_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
+{
+       memset(fchs, 0, sizeof(struct fchs_s));
+
+       fchs->routing = FC_RTG_FC4_DEV_DATA;
+       fchs->cat_info = FC_CAT_SOLICIT_CTRL;
+       fchs->type = FC_TYPE_SERVICES;
+       fchs->f_ctl =
+               bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
+                          FCTL_END_SEQ | FCTL_SI_XFER);
+       fchs->d_id = d_id;
+       fchs->s_id = s_id;
+       fchs->ox_id = ox_id;
+}
+
 void
 fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
 {
@@ -207,7 +222,7 @@ fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
 static          u16
 fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
                 __be16 ox_id, wwn_t port_name, wwn_t node_name,
-                u16 pdu_size, u8 els_code)
+                u16 pdu_size, u16 bb_cr, u8 els_code)
 {
        struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
 
@@ -220,6 +235,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
                fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
        plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size);
+       plogi->csp.bbcred  = cpu_to_be16(bb_cr);
 
        memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
        memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
@@ -268,15 +284,17 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 u16
 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
                   __be16 ox_id, wwn_t port_name, wwn_t node_name,
-                  u16 pdu_size, u16 local_bb_credits)
+                  u16 pdu_size, u16 local_bb_credits, u8 bb_scn)
 {
        u32        d_id = 0;
+       u16        bbscn_rxsz = (bb_scn << 12) | pdu_size;
 
        memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
        fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
        flogi->els_cmd.els_code = FC_ELS_ACC;
-       flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
+       flogi->class3.rxsz = cpu_to_be16(pdu_size);
+       flogi->csp.rxsz  = cpu_to_be16(bbscn_rxsz);     /* bb_scn/rxsz */
        flogi->port_name = port_name;
        flogi->node_name = node_name;
 
@@ -306,19 +324,19 @@ fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 u16
 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
               u16 ox_id, wwn_t port_name, wwn_t node_name,
-              u16 pdu_size)
+              u16 pdu_size, u16 bb_cr)
 {
        return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name,
-                               node_name, pdu_size, FC_ELS_PLOGI);
+                               node_name, pdu_size, bb_cr, FC_ELS_PLOGI);
 }
 
 u16
 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
                   u16 ox_id, wwn_t port_name, wwn_t node_name,
-                  u16 pdu_size)
+                  u16 pdu_size, u16 bb_cr)
 {
        return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name,
-                               node_name, pdu_size, FC_ELS_ACC);
+                               node_name, pdu_size, bb_cr, FC_ELS_ACC);
 }
 
 enum fc_parse_status
@@ -1095,6 +1113,21 @@ fc_ct_rsp_parse(struct ct_hdr_s *cthdr)
        return FC_PARSE_OK;
 }
 
+u16
+fc_gs_rjt_build(struct fchs_s *fchs,  struct ct_hdr_s *cthdr,
+               u32 d_id, u32 s_id, u16 ox_id, u8 reason_code,
+               u8 reason_code_expl)
+{
+       fc_gsresp_fchdr_build(fchs, d_id, s_id, ox_id);
+
+       cthdr->cmd_rsp_code = cpu_to_be16(CT_RSP_REJECT);
+       cthdr->rev_id = CT_GS3_REVISION;
+
+       cthdr->reason_code = reason_code;
+       cthdr->exp_code    = reason_code_expl;
+       return sizeof(struct ct_hdr_s);
+}
+
 u16
 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
                u8 set_br_reg, u32 s_id, u16 ox_id)
index ece51ec7620b154819a342b8fc54aa89e1242bca..42cd9d4da697b767eb66601644b155b4d2162bcd 100644 (file)
@@ -66,6 +66,9 @@ fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed speed)
        case RPSC_OP_SPEED_8G:
                return BFA_PORT_SPEED_8GBPS;
 
+       case RPSC_OP_SPEED_16G:
+               return BFA_PORT_SPEED_16GBPS;
+
        case RPSC_OP_SPEED_10G:
                return BFA_PORT_SPEED_10GBPS;
 
@@ -94,6 +97,9 @@ fc_bfa_speed_to_rpsc_operspeed(enum bfa_port_speed op_speed)
        case BFA_PORT_SPEED_8GBPS:
                return RPSC_OP_SPEED_8G;
 
+       case BFA_PORT_SPEED_16GBPS:
+               return RPSC_OP_SPEED_16G;
+
        case BFA_PORT_SPEED_10GBPS:
                return RPSC_OP_SPEED_10G;
 
@@ -141,11 +147,11 @@ u16        fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
                                   u32 s_id, __be16 ox_id,
                                   wwn_t port_name, wwn_t node_name,
                                   u16 pdu_size,
-                                  u16 local_bb_credits);
+                                  u16 local_bb_credits, u8 bb_scn);
 
 u16        fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id,
                               u32 s_id, u16 ox_id, wwn_t port_name,
-                              wwn_t node_name, u16 pdu_size);
+                              wwn_t node_name, u16 pdu_size, u16 bb_cr);
 
 enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs);
 
@@ -177,13 +183,17 @@ u16        fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
 u16        fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
                               u16 ox_id, u32 port_id);
 
+u16    fc_gs_rjt_build(struct fchs_s *fchs, struct ct_hdr_s *cthdr,
+                       u32 d_id, u32 s_id, u16 ox_id,
+                       u8 reason_code, u8 reason_code_expl);
+
 u16        fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
                        u8 set_br_reg, u32 s_id, u16 ox_id);
 
 u16        fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
                                   u32 s_id, u16 ox_id,
                                   wwn_t port_name, wwn_t node_name,
-                                  u16 pdu_size);
+                                  u16 pdu_size, u16 bb_cr);
 
 u16        fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
                        u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name,
index c0353cdca92956f6d3d82070f9cd7f0ce1556f63..e07bd4745d8ba5b968ded24e81785096c1535b84 100644 (file)
 #include "bfa_modules.h"
 
 BFA_TRC_FILE(HAL, FCPIM);
-BFA_MODULE(fcpim);
 
 /*
  *  BFA ITNIM Related definitions
  */
 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
+static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
+static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
+static void bfa_ioim_lm_init(struct bfa_s *bfa);
 
 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag)                                \
        (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
@@ -58,6 +60,14 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
        }                                                               \
 } while (0)
 
+#define bfa_ioim_rp_wwn(__ioim)                                                \
+       (((struct bfa_fcs_rport_s *)                                    \
+        (__ioim)->itnim->rport->rport_drv)->pwwn)
+
+#define bfa_ioim_lp_wwn(__ioim)                                                \
+       ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa),                  \
+       (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn)              \
+
 #define bfa_itnim_sler_cb(__itnim) do {                                        \
        if ((__itnim)->bfa->fcs)                                        \
                bfa_cb_itnim_sler((__itnim)->ditn);      \
@@ -67,6 +77,18 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
        }                                                               \
 } while (0)
 
+enum bfa_ioim_lm_status {
+       BFA_IOIM_LM_PRESENT = 1,
+       BFA_IOIM_LM_LUN_NOT_SUP = 2,
+       BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
+       BFA_IOIM_LM_LUN_NOT_RDY = 4,
+};
+
+enum bfa_ioim_lm_ua_status {
+       BFA_IOIM_LM_UA_RESET = 0,
+       BFA_IOIM_LM_UA_SET = 1,
+};
+
 /*
  *  itnim state machine event
  */
@@ -123,6 +145,9 @@ enum bfa_ioim_event {
        BFA_IOIM_SM_TMDONE      = 16,   /*  IO cleanup from tskim */
        BFA_IOIM_SM_HWFAIL      = 17,   /*  IOC h/w failure event */
        BFA_IOIM_SM_IOTOV       = 18,   /*  ITN offline TOV */
+       BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/*  lunmask lun not supported */
+       BFA_IOIM_SM_LM_RPL_DC = 20,     /*  lunmask report-lun data changed */
+       BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/*  lunmask lun not ready */
 };
 
 
@@ -220,6 +245,9 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
 static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
+static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
 
 /*
  * forward declaration of BFA IO state machine
@@ -287,24 +315,16 @@ static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
  * Compute and return memory needed by FCP(im) module.
  */
 static void
-bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-               u32 *dm_len)
+bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
 {
-       bfa_itnim_meminfo(cfg, km_len, dm_len);
+       bfa_itnim_meminfo(cfg, km_len);
 
        /*
         * IO memory
         */
-       if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
-               cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
-       else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
-               cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
-
        *km_len += cfg->fwcfg.num_ioim_reqs *
          (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
 
-       *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
-
        /*
         * task management command memory
         */
@@ -315,52 +335,41 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
 
 
 static void
-bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-               struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
+               struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = &fcp->fcpim;
+       struct bfa_s *bfa = fcp->bfa;
 
        bfa_trc(bfa, cfg->drvcfg.path_tov);
        bfa_trc(bfa, cfg->fwcfg.num_rports);
        bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
        bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
 
+       fcpim->fcp              = fcp;
        fcpim->bfa              = bfa;
        fcpim->num_itnims       = cfg->fwcfg.num_rports;
-       fcpim->num_ioim_reqs  = cfg->fwcfg.num_ioim_reqs;
        fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
        fcpim->path_tov         = cfg->drvcfg.path_tov;
        fcpim->delay_comp       = cfg->drvcfg.delay_comp;
        fcpim->profile_comp = NULL;
        fcpim->profile_start = NULL;
 
-       bfa_itnim_attach(fcpim, meminfo);
-       bfa_tskim_attach(fcpim, meminfo);
-       bfa_ioim_attach(fcpim, meminfo);
+       bfa_itnim_attach(fcpim);
+       bfa_tskim_attach(fcpim);
+       bfa_ioim_attach(fcpim);
 }
 
 static void
-bfa_fcpim_detach(struct bfa_s *bfa)
+bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
 {
-}
-
-static void
-bfa_fcpim_start(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_fcpim_stop(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_fcpim_iocdisable(struct bfa_s *bfa)
-{
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = &fcp->fcpim;
        struct bfa_itnim_s *itnim;
        struct list_head *qe, *qen;
 
+       /* Enqueue unused ioim resources to free_q */
+       list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
+
        list_for_each_safe(qe, qen, &fcpim->itnim_q) {
                itnim = (struct bfa_itnim_s *) qe;
                bfa_itnim_iocdisable(itnim);
@@ -370,7 +379,7 @@ bfa_fcpim_iocdisable(struct bfa_s *bfa)
 void
 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 
        fcpim->path_tov = path_tov * 1000;
        if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
@@ -380,15 +389,146 @@ bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
 u16
 bfa_fcpim_path_tov_get(struct bfa_s *bfa)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 
        return fcpim->path_tov / 1000;
 }
 
+#define bfa_fcpim_add_iostats(__l, __r, __stats)       \
+       (__l->__stats += __r->__stats)
+
+void
+bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
+               struct bfa_itnim_iostats_s *rstats)
+{
+       bfa_fcpim_add_iostats(lstats, rstats, total_ios);
+       bfa_fcpim_add_iostats(lstats, rstats, qresumes);
+       bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
+       bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
+       bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
+       bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
+       bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
+       bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
+       bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
+       bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
+       bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
+       bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
+       bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
+       bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
+       bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
+       bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
+       bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
+       bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
+       bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
+       bfa_fcpim_add_iostats(lstats, rstats, onlines);
+       bfa_fcpim_add_iostats(lstats, rstats, offlines);
+       bfa_fcpim_add_iostats(lstats, rstats, creates);
+       bfa_fcpim_add_iostats(lstats, rstats, deletes);
+       bfa_fcpim_add_iostats(lstats, rstats, create_comps);
+       bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
+       bfa_fcpim_add_iostats(lstats, rstats, sler_events);
+       bfa_fcpim_add_iostats(lstats, rstats, fw_create);
+       bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
+       bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
+       bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_success);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
+       bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
+       bfa_fcpim_add_iostats(lstats, rstats, io_comps);
+       bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
+       bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
+       bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
+       bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
+       bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
+       bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
+       bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
+       bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
+       bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
+       bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
+}
+
+bfa_status_t
+bfa_fcpim_port_iostats(struct bfa_s *bfa,
+               struct bfa_itnim_iostats_s *stats, u8 lp_tag)
+{
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+       struct list_head *qe, *qen;
+       struct bfa_itnim_s *itnim;
+
+       /* accumulate IO stats from itnim */
+       memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
+       list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+               itnim = (struct bfa_itnim_s *) qe;
+               if (itnim->rport->rport_info.lp_tag != lp_tag)
+                       continue;
+               bfa_fcpim_add_stats(stats, &(itnim->stats));
+       }
+       return BFA_STATUS_OK;
+}
+
+void
+bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
+{
+       struct bfa_itnim_latency_s *io_lat =
+                       &(ioim->itnim->ioprofile.io_latency);
+       u32 val, idx;
+
+       val = (u32)(jiffies - ioim->start_time);
+       idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
+       bfa_itnim_ioprofile_update(ioim->itnim, idx);
+
+       io_lat->count[idx]++;
+       io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
+       io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
+       io_lat->avg[idx] += val;
+}
+
+void
+bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
+{
+       ioim->start_time = jiffies;
+}
+
+bfa_status_t
+bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
+{
+       struct bfa_itnim_s *itnim;
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+       struct list_head *qe, *qen;
+
+       /* accumulate IO stats from itnim */
+       list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+               itnim = (struct bfa_itnim_s *) qe;
+               bfa_itnim_clear_stats(itnim);
+       }
+       fcpim->io_profile = BFA_TRUE;
+       fcpim->io_profile_start_time = time;
+       fcpim->profile_comp = bfa_ioim_profile_comp;
+       fcpim->profile_start = bfa_ioim_profile_start;
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcpim_profile_off(struct bfa_s *bfa)
+{
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+       fcpim->io_profile = BFA_FALSE;
+       fcpim->io_profile_start_time = 0;
+       fcpim->profile_comp = NULL;
+       fcpim->profile_start = NULL;
+       return BFA_STATUS_OK;
+}
+
 u16
 bfa_fcpim_qdepth_get(struct bfa_s *bfa)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 
        return fcpim->q_depth;
 }
@@ -990,8 +1130,7 @@ bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
 }
 
 void
-bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-               u32 *dm_len)
+bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
 {
        /*
         * ITN memory
@@ -1000,15 +1139,16 @@ bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
 }
 
 void
-bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
+bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
 {
        struct bfa_s    *bfa = fcpim->bfa;
+       struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
        struct bfa_itnim_s *itnim;
        int     i, j;
 
        INIT_LIST_HEAD(&fcpim->itnim_q);
 
-       itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
+       itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
        fcpim->itnim_arr = itnim;
 
        for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
@@ -1030,7 +1170,7 @@ bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
        }
 
-       bfa_meminfo_kva(minfo) = (u8 *) itnim;
+       bfa_mem_kva_curp(fcp) = (u8 *) itnim;
 }
 
 void
@@ -1043,7 +1183,7 @@ bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
 static bfa_boolean_t
 bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
 {
-       struct bfi_itnim_create_req_s *m;
+       struct bfi_itn_create_req_s *m;
 
        itnim->msg_no++;
 
@@ -1056,8 +1196,8 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
                return BFA_FALSE;
        }
 
-       bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
-                       bfa_lpuid(itnim->bfa));
+       bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
+                       bfa_fn_lpu(itnim->bfa));
        m->fw_handle = itnim->rport->fw_handle;
        m->class = FC_CLASS_3;
        m->seq_rec = itnim->seq_rec;
@@ -1067,14 +1207,14 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(itnim->bfa, itnim->reqq);
+       bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
        return BFA_TRUE;
 }
 
 static bfa_boolean_t
 bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
 {
-       struct bfi_itnim_delete_req_s *m;
+       struct bfi_itn_delete_req_s *m;
 
        /*
         * check for room in queue to send request now
@@ -1085,15 +1225,15 @@ bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
                return BFA_FALSE;
        }
 
-       bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
-                       bfa_lpuid(itnim->bfa));
+       bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
+                       bfa_fn_lpu(itnim->bfa));
        m->fw_handle = itnim->rport->fw_handle;
        bfa_stats(itnim, fw_delete);
 
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(itnim->bfa, itnim->reqq);
+       bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
        return BFA_TRUE;
 }
 
@@ -1224,7 +1364,7 @@ bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
 static void
 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
        fcpim->del_itn_stats.del_itn_iocomp_aborted +=
                itnim->stats.iocomp_aborted;
        fcpim->del_itn_stats.del_itn_iocomp_timedout +=
@@ -1250,8 +1390,8 @@ bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
 void
 bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-       union bfi_itnim_i2h_msg_u msg;
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+       union bfi_itn_i2h_msg_u msg;
        struct bfa_itnim_s *itnim;
 
        bfa_trc(bfa, m->mhdr.msg_id);
@@ -1259,7 +1399,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
        msg.msg = m;
 
        switch (m->mhdr.msg_id) {
-       case BFI_ITNIM_I2H_CREATE_RSP:
+       case BFI_ITN_I2H_CREATE_RSP:
                itnim = BFA_ITNIM_FROM_TAG(fcpim,
                                                msg.create_rsp->bfa_handle);
                WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
@@ -1267,7 +1407,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
                bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
                break;
 
-       case BFI_ITNIM_I2H_DELETE_RSP:
+       case BFI_ITN_I2H_DELETE_RSP:
                itnim = BFA_ITNIM_FROM_TAG(fcpim,
                                                msg.delete_rsp->bfa_handle);
                WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
@@ -1275,7 +1415,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
                bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
                break;
 
-       case BFI_ITNIM_I2H_SLER_EVENT:
+       case BFI_ITN_I2H_SLER_EVENT:
                itnim = BFA_ITNIM_FROM_TAG(fcpim,
                                                msg.sler_event->bfa_handle);
                bfa_stats(itnim, sler_events);
@@ -1295,9 +1435,11 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 struct bfa_itnim_s *
 bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
        struct bfa_itnim_s *itnim;
 
+       bfa_itn_create(bfa, rport, bfa_itnim_isr);
+
        itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
        WARN_ON(itnim->rport != rport);
 
@@ -1347,6 +1489,26 @@ bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
                 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
 }
 
+#define bfa_io_lat_clock_res_div       HZ
+#define bfa_io_lat_clock_res_mul       1000
+bfa_status_t
+bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
+                       struct bfa_itnim_ioprofile_s *ioprofile)
+{
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
+       if (!fcpim->io_profile)
+               return BFA_STATUS_IOPROFILE_OFF;
+
+       itnim->ioprofile.index = BFA_IOBUCKET_MAX;
+       itnim->ioprofile.io_profile_start_time =
+                               bfa_io_profile_start_time(itnim->bfa);
+       itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
+       itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
+       *ioprofile = itnim->ioprofile;
+
+       return BFA_STATUS_OK;
+}
+
 void
 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
 {
@@ -1415,7 +1577,28 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
                WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
-                               __bfa_cb_ioim_abort, ioim);
+                       __bfa_cb_ioim_abort, ioim);
+               break;
+
+       case BFA_IOIM_SM_LM_LUN_NOT_SUP:
+               bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+               bfa_ioim_move_to_comp_q(ioim);
+               bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+                       __bfa_cb_ioim_lm_lun_not_sup, ioim);
+               break;
+
+       case BFA_IOIM_SM_LM_RPL_DC:
+               bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+               bfa_ioim_move_to_comp_q(ioim);
+               bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+                               __bfa_cb_ioim_lm_rpl_dc, ioim);
+               break;
+
+       case BFA_IOIM_SM_LM_LUN_NOT_RDY:
+               bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+               bfa_ioim_move_to_comp_q(ioim);
+               bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+                       __bfa_cb_ioim_lm_lun_not_rdy, ioim);
                break;
 
        default:
@@ -1955,6 +2138,264 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
        }
 }
 
+/*
+ * This is called from bfa_fcpim_start after the bfa_init() with flash read
+ * is complete by driver. now invalidate the stale content of lun mask
+ * like unit attention, rp tag and lp tag.
+ */
+static void
+bfa_ioim_lm_init(struct bfa_s *bfa)
+{
+       struct bfa_lun_mask_s *lunm_list;
+       int     i;
+
+       if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+               return;
+
+       lunm_list = bfa_get_lun_mask_list(bfa);
+       for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+               lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
+               lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
+               lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
+       }
+}
+
+/*
+ * Validate LUN for LUN masking
+ */
+static enum bfa_ioim_lm_status
+bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
+               struct bfa_rport_s *rp, struct scsi_lun lun)
+{
+       u8 i;
+       struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
+       struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
+       struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
+
+       if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
+           (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
+               ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
+               return BFA_IOIM_LM_PRESENT;
+       }
+
+       for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+
+               if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+                       continue;
+
+               if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
+                   scsilun_to_int((struct scsi_lun *)&lun))
+                   && (rp->rport_tag == lun_list[i].rp_tag)
+                   && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
+                                               lun_list[i].lp_tag)) {
+                       bfa_trc(ioim->bfa, lun_list[i].rp_tag);
+                       bfa_trc(ioim->bfa, lun_list[i].lp_tag);
+                       bfa_trc(ioim->bfa, scsilun_to_int(
+                               (struct scsi_lun *)&lun_list[i].lun));
+
+                       if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
+                           ((cdb->scsi_cdb[0] != INQUIRY) ||
+                           (cdb->scsi_cdb[0] != REPORT_LUNS))) {
+                               lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
+                               return BFA_IOIM_LM_RPL_DATA_CHANGED;
+                       }
+
+                       if (cdb->scsi_cdb[0] == REPORT_LUNS)
+                               ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
+
+                       return BFA_IOIM_LM_PRESENT;
+               }
+       }
+
+       if ((cdb->scsi_cdb[0] == INQUIRY) &&
+           (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
+               ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
+               return BFA_IOIM_LM_PRESENT;
+       }
+
+       if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
+               return BFA_IOIM_LM_LUN_NOT_RDY;
+
+       return BFA_IOIM_LM_LUN_NOT_SUP;
+}
+
+static bfa_boolean_t
+bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
+{
+       return BFA_TRUE;
+}
+
+static void
+bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
+               int buf_lun_cnt)
+{
+       struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
+       struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
+       struct scsi_lun lun;
+       int i, j;
+
+       bfa_trc(ioim->bfa, buf_lun_cnt);
+       for (j = 0; j < buf_lun_cnt; j++) {
+               lun = *((struct scsi_lun *)(lun_data + j));
+               for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+                       if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+                               continue;
+                       if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
+                           (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
+                           (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
+                               == scsilun_to_int((struct scsi_lun *)&lun))) {
+                               lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
+                               break;
+                       }
+               } /* next lun in mask DB */
+       } /* next lun in buf */
+}
+
+static int
+bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
+               struct scsi_report_luns_data_s *rl)
+{
+       struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
+       struct scatterlist *sg = scsi_sglist(cmnd);
+       struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
+       struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
+       int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
+       int lun_across_sg_bytes, bytes_from_next_buf;
+       u64     last_lun, temp_last_lun;
+
+       /* fetch luns from the first sg element */
+       bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
+                       (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
+
+       /* fetch luns from multiple sg elements */
+       scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
+               if (sgeid == 0) {
+                       prev_sg_len = sg_dma_len(sg);
+                       prev_rl_data = (struct scsi_lun *)
+                                       phys_to_virt(sg_dma_address(sg));
+                       continue;
+               }
+
+               /* if the buf is having more data */
+               lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
+               if (lun_across_sg_bytes) {
+                       bfa_trc(ioim->bfa, lun_across_sg_bytes);
+                       bfa_stats(ioim->itnim, lm_lun_across_sg);
+                       bytes_from_next_buf = sizeof(struct scsi_lun) -
+                                             lun_across_sg_bytes;
+
+                       /* from next buf take higher bytes */
+                       temp_last_lun = *((u64 *)
+                                         phys_to_virt(sg_dma_address(sg)));
+                       last_lun |= temp_last_lun >>
+                                   (lun_across_sg_bytes * BITS_PER_BYTE);
+
+                       /* from prev buf take higher bytes */
+                       temp_last_lun = *((u64 *)(prev_rl_data +
+                                         (prev_sg_len - lun_across_sg_bytes)));
+                       temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
+                       last_lun = last_lun | (temp_last_lun <<
+                                  (bytes_from_next_buf * BITS_PER_BYTE));
+
+                       bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
+               } else
+                       bytes_from_next_buf = 0;
+
+               *pgdlen += sg_dma_len(sg);
+               prev_sg_len = sg_dma_len(sg);
+               prev_rl_data = (struct scsi_lun *)
+                               phys_to_virt(sg_dma_address(sg));
+               bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
+                               bytes_from_next_buf,
+                               sg_dma_len(sg) / sizeof(struct scsi_lun));
+       }
+
+       /* update the report luns data - based on fetched luns */
+       sg = scsi_sglist(cmnd);
+       base_rl_data = (struct scsi_lun *)rl->lun;
+       base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
+       for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
+               if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
+                       base_rl_data[j] = lun_list[i].lun;
+                       lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
+                       j++;
+                       lun_fetched_cnt++;
+               }
+
+               if (j > base_count) {
+                       j = 0;
+                       sg = sg_next(sg);
+                       base_rl_data = (struct scsi_lun *)
+                                       phys_to_virt(sg_dma_address(sg));
+                       base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
+               }
+       }
+
+       bfa_trc(ioim->bfa, lun_fetched_cnt);
+       return lun_fetched_cnt;
+}
+
+static bfa_boolean_t
+bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
+{
+       struct scsi_inquiry_data_s *inq;
+       struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
+
+       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
+       inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
+
+       bfa_trc(ioim->bfa, inq->device_type);
+       inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
+       return 0;
+}
+
+static bfa_boolean_t
+bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
+{
+       struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
+       struct scatterlist *sg = scsi_sglist(cmnd);
+       struct bfi_ioim_rsp_s *m;
+       struct scsi_report_luns_data_s *rl = NULL;
+       int lun_count = 0, lun_fetched_cnt = 0;
+       u32 residue, pgdlen = 0;
+
+       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
+       if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
+               return BFA_TRUE;
+
+       m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
+       if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
+               return BFA_TRUE;
+
+       pgdlen = sg_dma_len(sg);
+       bfa_trc(ioim->bfa, pgdlen);
+       rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
+       lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
+       lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
+
+       if (lun_count == lun_fetched_cnt)
+               return BFA_TRUE;
+
+       bfa_trc(ioim->bfa, lun_count);
+       bfa_trc(ioim->bfa, lun_fetched_cnt);
+       bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
+
+       if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
+               rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
+                                     sizeof(struct scsi_lun);
+       else
+               bfa_stats(ioim->itnim, lm_small_buf_addresidue);
+
+       bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
+       bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
+
+       residue = be32_to_cpu(m->residue);
+       residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
+       bfa_stats(ioim->itnim, lm_wire_residue_changed);
+       m->residue = be32_to_cpu(residue);
+       bfa_trc(ioim->bfa, ioim->nsges);
+       return BFA_FALSE;
+}
 
 static void
 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
@@ -1991,7 +2432,8 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
                if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
                                        m->sns_len) {
                        sns_len = m->sns_len;
-                       snsinfo = ioim->iosp->snsinfo;
+                       snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
+                                               ioim->iotag);
                }
 
                /*
@@ -2012,6 +2454,299 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
                          m->scsi_status, sns_len, snsinfo, residue);
 }
 
+static void
+__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
+{
+       struct bfa_ioim_s *ioim = cbarg;
+       int sns_len = 0xD;
+       u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
+       struct scsi_sense_s *snsinfo;
+
+       if (!complete) {
+               bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+               return;
+       }
+
+       snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
+                                       ioim->fcpim->fcp, ioim->iotag);
+       snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
+       snsinfo->add_sense_length = 0xa;
+       snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
+       snsinfo->sense_key = ILLEGAL_REQUEST;
+       bfa_trc(ioim->bfa, residue);
+       bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
+                       SCSI_STATUS_CHECK_CONDITION, sns_len,
+                       (u8 *)snsinfo, residue);
+}
+
+static void
+__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
+{
+       struct bfa_ioim_s *ioim = cbarg;
+       int sns_len = 0xD;
+       u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
+       struct scsi_sense_s *snsinfo;
+
+       if (!complete) {
+               bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+               return;
+       }
+
+       snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
+                                                      ioim->iotag);
+       snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
+       snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
+       snsinfo->asc = SCSI_ASC_TOCC;
+       snsinfo->add_sense_length = 0x6;
+       snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
+       bfa_trc(ioim->bfa, residue);
+       bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
+                       SCSI_STATUS_CHECK_CONDITION, sns_len,
+                       (u8 *)snsinfo, residue);
+}
+
+static void
+__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
+{
+       struct bfa_ioim_s *ioim = cbarg;
+       int sns_len = 0xD;
+       u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
+       struct scsi_sense_s *snsinfo;
+
+       if (!complete) {
+               bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+               return;
+       }
+
+       snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
+                                       ioim->fcpim->fcp, ioim->iotag);
+       snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
+       snsinfo->add_sense_length = 0xa;
+       snsinfo->sense_key = NOT_READY;
+       snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
+       snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
+       bfa_trc(ioim->bfa, residue);
+       bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
+                       SCSI_STATUS_CHECK_CONDITION, sns_len,
+                       (u8 *)snsinfo, residue);
+}
+
+void
+bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
+                       u16 rp_tag, u8 lp_tag)
+{
+       struct bfa_lun_mask_s *lun_list;
+       u8      i;
+
+       if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+               return;
+
+       lun_list = bfa_get_lun_mask_list(bfa);
+       for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+               if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
+                       if ((lun_list[i].lp_wwn == lp_wwn) &&
+                           (lun_list[i].rp_wwn == rp_wwn)) {
+                               lun_list[i].rp_tag = rp_tag;
+                               lun_list[i].lp_tag = lp_tag;
+                       }
+               }
+       }
+}
+
+/*
+ * set UA for all active luns in LM DB
+ */
+static void
+bfa_ioim_lm_set_ua(struct bfa_s *bfa)
+{
+       struct bfa_lun_mask_s   *lunm_list;
+       int     i;
+
+       lunm_list = bfa_get_lun_mask_list(bfa);
+       for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+               if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+                       continue;
+               lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+       }
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
+{
+       struct bfa_lunmask_cfg_s        *lun_mask;
+
+       bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+       if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+               return BFA_STATUS_FAILED;
+
+       if (bfa_get_lun_mask_status(bfa) == update)
+               return BFA_STATUS_NO_CHANGE;
+
+       lun_mask = bfa_get_lun_mask(bfa);
+       lun_mask->status = update;
+
+       if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
+               bfa_ioim_lm_set_ua(bfa);
+
+       return  bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
+{
+       int i;
+       struct bfa_lun_mask_s   *lunm_list;
+
+       bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+       if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+               return BFA_STATUS_FAILED;
+
+       lunm_list = bfa_get_lun_mask_list(bfa);
+       for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+               if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
+                       if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
+                               bfa_rport_unset_lunmask(bfa,
+                                 BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
+               }
+       }
+
+       memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
+       return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
+{
+       struct bfa_lunmask_cfg_s *lun_mask;
+
+       bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+       if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+               return BFA_STATUS_FAILED;
+
+       lun_mask = bfa_get_lun_mask(bfa);
+       memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
+                     wwn_t rpwwn, struct scsi_lun lun)
+{
+       struct bfa_lun_mask_s *lunm_list;
+       struct bfa_rport_s *rp = NULL;
+       int i, free_index = MAX_LUN_MASK_CFG + 1;
+       struct bfa_fcs_lport_s *port = NULL;
+       struct bfa_fcs_rport_s *rp_fcs;
+
+       bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+       if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+               return BFA_STATUS_FAILED;
+
+       port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
+                                  vf_id, *pwwn);
+       if (port) {
+               *pwwn = port->port_cfg.pwwn;
+               rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
+               rp = rp_fcs->bfa_rport;
+       }
+
+       lunm_list = bfa_get_lun_mask_list(bfa);
+       /* if entry exists */
+       for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+               if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+                       free_index = i;
+               if ((lunm_list[i].lp_wwn == *pwwn) &&
+                   (lunm_list[i].rp_wwn == rpwwn) &&
+                   (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
+                    scsilun_to_int((struct scsi_lun *)&lun)))
+                       return  BFA_STATUS_ENTRY_EXISTS;
+       }
+
+       if (free_index > MAX_LUN_MASK_CFG)
+               return BFA_STATUS_MAX_ENTRY_REACHED;
+
+       if (rp) {
+               lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
+                                                  rp->rport_info.local_pid);
+               lunm_list[free_index].rp_tag = rp->rport_tag;
+       } else {
+               lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
+               lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
+       }
+
+       lunm_list[free_index].lp_wwn = *pwwn;
+       lunm_list[free_index].rp_wwn = rpwwn;
+       lunm_list[free_index].lun = lun;
+       lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
+
+       /* set for all luns in this rp */
+       for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+               if ((lunm_list[i].lp_wwn == *pwwn) &&
+                   (lunm_list[i].rp_wwn == rpwwn))
+                       lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+       }
+
+       return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
+                        wwn_t rpwwn, struct scsi_lun lun)
+{
+       struct bfa_lun_mask_s   *lunm_list;
+       struct bfa_rport_s      *rp = NULL;
+       struct bfa_fcs_lport_s *port = NULL;
+       struct bfa_fcs_rport_s *rp_fcs;
+       int     i;
+
+       /* in min cfg lunm_list could be NULL but  no commands should run. */
+       if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+               return BFA_STATUS_FAILED;
+
+       bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+       bfa_trc(bfa, *pwwn);
+       bfa_trc(bfa, rpwwn);
+       bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
+
+       if (*pwwn == 0) {
+               port = bfa_fcs_lookup_port(
+                               &((struct bfad_s *)bfa->bfad)->bfa_fcs,
+                               vf_id, *pwwn);
+               if (port) {
+                       *pwwn = port->port_cfg.pwwn;
+                       rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
+                       rp = rp_fcs->bfa_rport;
+               }
+       }
+
+       lunm_list = bfa_get_lun_mask_list(bfa);
+       for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+               if ((lunm_list[i].lp_wwn == *pwwn) &&
+                   (lunm_list[i].rp_wwn == rpwwn) &&
+                   (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
+                    scsilun_to_int((struct scsi_lun *)&lun))) {
+                       lunm_list[i].lp_wwn = 0;
+                       lunm_list[i].rp_wwn = 0;
+                       int_to_scsilun(0, &lunm_list[i].lun);
+                       lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
+                       if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
+                               lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
+                               lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
+                       }
+                       return bfa_dconf_update(bfa);
+               }
+       }
+
+       /* set for all luns in this rp */
+       for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+               if ((lunm_list[i].lp_wwn == *pwwn) &&
+                   (lunm_list[i].rp_wwn == rpwwn))
+                       lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+       }
+
+       return BFA_STATUS_ENTRY_NOT_EXISTS;
+}
+
 static void
 __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
 {
@@ -2022,6 +2757,7 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
                return;
        }
 
+       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
        bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
                          0, 0, NULL, 0);
 }
@@ -2037,6 +2773,7 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
                return;
        }
 
+       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
        bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
                          0, 0, NULL, 0);
 }
@@ -2051,6 +2788,7 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
                return;
        }
 
+       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
        bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
 }
 
@@ -2189,12 +2927,12 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
         */
        switch (m->cmnd.iodir) {
        case FCP_IODIR_READ:
-               bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
+               bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
                bfa_stats(itnim, input_reqs);
                ioim->itnim->stats.rd_throughput += fcp_dl;
                break;
        case FCP_IODIR_WRITE:
-               bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
+               bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
                bfa_stats(itnim, output_reqs);
                ioim->itnim->stats.wr_throughput += fcp_dl;
                break;
@@ -2202,16 +2940,16 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
                bfa_stats(itnim, input_reqs);
                bfa_stats(itnim, output_reqs);
        default:
-               bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
+               bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
        }
        if (itnim->seq_rec ||
            (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
-               bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
+               bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
 
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(ioim->bfa, ioim->reqq);
+       bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
        return BFA_TRUE;
 }
 
@@ -2269,14 +3007,14 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
        else
                msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
 
-       bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
+       bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
        m->io_tag    = cpu_to_be16(ioim->iotag);
        m->abort_tag = ++ioim->abort_tag;
 
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(ioim->bfa, ioim->reqq);
+       bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
        return BFA_TRUE;
 }
 
@@ -2360,46 +3098,32 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
  * Memory allocation and initialization.
  */
 void
-bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
+bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
 {
        struct bfa_ioim_s               *ioim;
+       struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
        struct bfa_ioim_sp_s    *iosp;
        u16             i;
-       u8                      *snsinfo;
-       u32             snsbufsz;
 
        /*
         * claim memory first
         */
-       ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
+       ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
        fcpim->ioim_arr = ioim;
-       bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
+       bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
 
-       iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
+       iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
        fcpim->ioim_sp_arr = iosp;
-       bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
-
-       /*
-        * Claim DMA memory for per IO sense data.
-        */
-       snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
-       fcpim->snsbase.pa  = bfa_meminfo_dma_phys(minfo);
-       bfa_meminfo_dma_phys(minfo) += snsbufsz;
-
-       fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
-       bfa_meminfo_dma_virt(minfo) += snsbufsz;
-       snsinfo = fcpim->snsbase.kva;
-       bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
+       bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
 
        /*
         * Initialize ioim free queues
         */
-       INIT_LIST_HEAD(&fcpim->ioim_free_q);
        INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
        INIT_LIST_HEAD(&fcpim->ioim_comp_q);
 
-       for (i = 0; i < fcpim->num_ioim_reqs;
-            i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
+       for (i = 0; i < fcpim->fcp->num_ioim_reqs;
+            i++, ioim++, iosp++) {
                /*
                 * initialize IOIM
                 */
@@ -2408,22 +3132,20 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
                ioim->bfa     = fcpim->bfa;
                ioim->fcpim   = fcpim;
                ioim->iosp    = iosp;
-               iosp->snsinfo = snsinfo;
+               ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
                INIT_LIST_HEAD(&ioim->sgpg_q);
                bfa_reqq_winit(&ioim->iosp->reqq_wait,
                                   bfa_ioim_qresume, ioim);
                bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
                                   bfa_ioim_sgpg_alloced, ioim);
                bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
-
-               list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
        }
 }
 
 void
 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
        struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
        struct bfa_ioim_s *ioim;
        u16     iotag;
@@ -2448,6 +3170,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
                        evt = BFA_IOIM_SM_DONE;
                else
                        evt = BFA_IOIM_SM_COMP;
+               ioim->proc_rsp_data(ioim);
                break;
 
        case BFI_IOIM_STS_TIMEDOUT:
@@ -2483,6 +3206,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
                if (rsp->abort_tag != ioim->abort_tag) {
                        bfa_trc(ioim->bfa, rsp->abort_tag);
                        bfa_trc(ioim->bfa, ioim->abort_tag);
+                       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
                        return;
                }
 
@@ -2501,13 +3225,14 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
                WARN_ON(1);
        }
 
+       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
        bfa_sm_send_event(ioim, evt);
 }
 
 void
 bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
        struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
        struct bfa_ioim_s *ioim;
        u16     iotag;
@@ -2518,7 +3243,16 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
        WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
 
        bfa_ioim_cb_profile_comp(fcpim, ioim);
-       bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
+
+       if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED)  {
+               bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
+               return;
+       }
+
+       if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
+               bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
+       else
+               bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
 }
 
 /*
@@ -2573,18 +3307,21 @@ struct bfa_ioim_s *
 bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
                struct bfa_itnim_s *itnim, u16 nsges)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
        struct bfa_ioim_s *ioim;
+       struct bfa_iotag_s *iotag = NULL;
 
        /*
         * alocate IOIM resource
         */
-       bfa_q_deq(&fcpim->ioim_free_q, &ioim);
-       if (!ioim) {
+       bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
+       if (!iotag) {
                bfa_stats(itnim, no_iotags);
                return NULL;
        }
 
+       ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
+
        ioim->dio = dio;
        ioim->itnim = itnim;
        ioim->nsges = nsges;
@@ -2601,7 +3338,8 @@ bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
 void
 bfa_ioim_free(struct bfa_ioim_s *ioim)
 {
-       struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
+       struct bfa_fcpim_s *fcpim = ioim->fcpim;
+       struct bfa_iotag_s *iotag;
 
        if (ioim->nsgpgs > 0)
                bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
@@ -2610,13 +3348,51 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
        fcpim->ios_active--;
 
        ioim->iotag &= BFA_IOIM_IOTAG_MASK;
+
+       WARN_ON(!(ioim->iotag <
+               (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
+       iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
+
+       if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
+               list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
+       else
+               list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
+
        list_del(&ioim->qe);
-       list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
 }
 
 void
 bfa_ioim_start(struct bfa_ioim_s *ioim)
 {
+       struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
+       struct bfa_lps_s        *lps;
+       enum bfa_ioim_lm_status status;
+       struct scsi_lun scsilun;
+
+       if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
+               lps = BFA_IOIM_TO_LPS(ioim);
+               int_to_scsilun(cmnd->device->lun, &scsilun);
+               status = bfa_ioim_lm_check(ioim, lps,
+                               ioim->itnim->rport, scsilun);
+               if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
+                       bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
+                       bfa_stats(ioim->itnim, lm_lun_not_rdy);
+                       return;
+               }
+
+               if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
+                       bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
+                       bfa_stats(ioim->itnim, lm_lun_not_sup);
+                       return;
+               }
+
+               if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
+                       bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
+                       bfa_stats(ioim->itnim, lm_rpl_data_changed);
+                       return;
+               }
+       }
+
        bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
 
        /*
@@ -3021,7 +3797,7 @@ bfa_tskim_send(struct bfa_tskim_s *tskim)
         * build i/o request message next
         */
        bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
-                       bfa_lpuid(tskim->bfa));
+                       bfa_fn_lpu(tskim->bfa));
 
        m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
        m->itn_fhdl = tskim->itnim->rport->fw_handle;
@@ -3032,7 +3808,7 @@ bfa_tskim_send(struct bfa_tskim_s *tskim)
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(tskim->bfa, itnim->reqq);
+       bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
        return BFA_TRUE;
 }
 
@@ -3056,14 +3832,14 @@ bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
         * build i/o request message next
         */
        bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
-                       bfa_lpuid(tskim->bfa));
+                       bfa_fn_lpu(tskim->bfa));
 
        m->tsk_tag  = cpu_to_be16(tskim->tsk_tag);
 
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(tskim->bfa, itnim->reqq);
+       bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
        return BFA_TRUE;
 }
 
@@ -3129,14 +3905,16 @@ bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
  * Memory allocation and initialization.
  */
 void
-bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
+bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
 {
        struct bfa_tskim_s *tskim;
+       struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
        u16     i;
 
        INIT_LIST_HEAD(&fcpim->tskim_free_q);
+       INIT_LIST_HEAD(&fcpim->tskim_unused_q);
 
-       tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
+       tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
        fcpim->tskim_arr = tskim;
 
        for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
@@ -3155,13 +3933,13 @@ bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
                list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
        }
 
-       bfa_meminfo_kva(minfo) = (u8 *) tskim;
+       bfa_mem_kva_curp(fcp) = (u8 *) tskim;
 }
 
 void
 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
        struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
        struct bfa_tskim_s *tskim;
        u16     tsk_tag = be16_to_cpu(rsp->tsk_tag);
@@ -3188,7 +3966,7 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 struct bfa_tskim_s *
 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
 {
-       struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
        struct bfa_tskim_s *tskim;
 
        bfa_q_deq(&fcpim->tskim_free_q, &tskim);
@@ -3233,3 +4011,221 @@ bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
        list_add_tail(&tskim->qe, &itnim->tsk_q);
        bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
 }
+
+void
+bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
+{
+       struct bfa_fcpim_s      *fcpim = BFA_FCPIM(bfa);
+       struct list_head        *qe;
+       int     i;
+
+       for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
+               bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
+               list_add_tail(qe, &fcpim->tskim_unused_q);
+       }
+}
+
+/* BFA FCP module - parent module for fcpim */
+
+BFA_MODULE(fcp);
+
+static void
+bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+               struct bfa_s *bfa)
+{
+       struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+       struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
+       struct bfa_mem_dma_s *seg_ptr;
+       u16     nsegs, idx, per_seg_ios, num_io_req;
+       u32     km_len = 0;
+
+       /*
+        * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
+        * So if the values are non zero, adjust them appropriately.
+        */
+       if (cfg->fwcfg.num_ioim_reqs &&
+           cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
+               cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
+       else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
+               cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
+
+       if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
+               cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
+
+       num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
+       if (num_io_req > BFA_IO_MAX) {
+               if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
+                       cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
+                       cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
+               } else if (cfg->fwcfg.num_fwtio_reqs)
+                       cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
+               else
+                       cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
+       }
+
+       bfa_fcpim_meminfo(cfg, &km_len);
+
+       num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
+       km_len += num_io_req * sizeof(struct bfa_iotag_s);
+       km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
+
+       /* dma memory */
+       nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
+       per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
+
+       bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
+               if (num_io_req >= per_seg_ios) {
+                       num_io_req -= per_seg_ios;
+                       bfa_mem_dma_setup(minfo, seg_ptr,
+                               per_seg_ios * BFI_IOIM_SNSLEN);
+               } else
+                       bfa_mem_dma_setup(minfo, seg_ptr,
+                               num_io_req * BFI_IOIM_SNSLEN);
+       }
+
+       /* kva memory */
+       bfa_mem_kva_setup(minfo, fcp_kva, km_len);
+}
+
+static void
+bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+               struct bfa_pcidev_s *pcidev)
+{
+       struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+       struct bfa_mem_dma_s *seg_ptr;
+       u16     idx, nsegs, num_io_req;
+
+       fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
+       fcp->num_fwtio_reqs  = cfg->fwcfg.num_fwtio_reqs;
+       fcp->num_itns   = cfg->fwcfg.num_rports;
+       fcp->bfa = bfa;
+
+       /*
+        * Setup the pool of snsbase addr's, that is passed to fw as
+        * part of bfi_iocfc_cfg_s.
+        */
+       num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
+       nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
+
+       bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
+
+               if (!bfa_mem_dma_virt(seg_ptr))
+                       break;
+
+               fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
+               fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
+               bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
+       }
+
+       bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
+
+       bfa_iotag_attach(fcp);
+
+       fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
+       bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
+                       (fcp->num_itns * sizeof(struct bfa_itn_s));
+       memset(fcp->itn_arr, 0,
+                       (fcp->num_itns * sizeof(struct bfa_itn_s)));
+}
+
+static void
+bfa_fcp_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcp_start(struct bfa_s *bfa)
+{
+       struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+
+       /*
+        * bfa_init() with flash read is complete. now invalidate the stale
+        * content of lun mask like unit attention, rp tag and lp tag.
+        */
+       bfa_ioim_lm_init(fcp->bfa);
+}
+
+static void
+bfa_fcp_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcp_iocdisable(struct bfa_s *bfa)
+{
+       struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+
+       /* Enqueue unused ioim resources to free_q */
+       list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q);
+
+       bfa_fcpim_iocdisable(fcp);
+}
+
+void
+bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw)
+{
+       struct bfa_fcp_mod_s    *mod = BFA_FCP_MOD(bfa);
+       struct list_head        *qe;
+       int     i;
+
+       for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
+               bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
+               list_add_tail(qe, &mod->iotag_unused_q);
+       }
+}
+
+void
+bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
+               void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
+{
+       struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+       struct bfa_itn_s *itn;
+
+       itn =  BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
+       itn->isr = isr;
+}
+
+/*
+ * Itn interrupt processing.
+ */
+void
+bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+       struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+       union bfi_itn_i2h_msg_u msg;
+       struct bfa_itn_s *itn;
+
+       msg.msg = m;
+       itn =  BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
+
+       if (itn->isr)
+               itn->isr(bfa, m);
+       else
+               WARN_ON(1);
+}
+
+void
+bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
+{
+       struct bfa_iotag_s *iotag;
+       u16     num_io_req, i;
+
+       iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
+       fcp->iotag_arr = iotag;
+
+       INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
+       INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
+       INIT_LIST_HEAD(&fcp->iotag_unused_q);
+
+       num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
+       for (i = 0; i < num_io_req; i++, iotag++) {
+               memset(iotag, 0, sizeof(struct bfa_iotag_s));
+               iotag->tag = i;
+               if (i < fcp->num_ioim_reqs)
+                       list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
+               else
+                       list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
+       }
+
+       bfa_mem_kva_curp(fcp) = (u8 *) iotag;
+}
index 1e38dade842341731e3a137c1ee9a02e79c9a1ec..1080bcb81cb73a2caf409d800da3b78deed8234f 100644 (file)
 #include "bfa_defs_svc.h"
 #include "bfa_cs.h"
 
+/* FCP module related definitions */
+#define BFA_IO_MAX     BFI_IO_MAX
+#define BFA_FWTIO_MAX  2000
+
+struct bfa_fcp_mod_s;
+struct bfa_iotag_s {
+       struct list_head        qe;     /* queue element        */
+       u16     tag;                    /* FW IO tag            */
+};
+
+struct bfa_itn_s {
+       bfa_isr_func_t isr;
+};
+
+void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
+               void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
+void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m);
+void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp);
+void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw);
+
+#define BFA_FCP_MOD(_hal)      (&(_hal)->modules.fcp_mod)
+#define BFA_MEM_FCP_KVA(__bfa) (&(BFA_FCP_MOD(__bfa)->kva_seg))
+#define BFA_IOTAG_FROM_TAG(_fcp, _tag) \
+       (&(_fcp)->iotag_arr[(_tag & BFA_IOIM_IOTAG_MASK)])
+#define BFA_ITN_FROM_TAG(_fcp, _tag)   \
+       ((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1)))
+#define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \
+       bfa_mem_get_dmabuf_kva(_fcp, _tag, BFI_IOIM_SNSLEN)
 
 #define BFA_ITNIM_MIN   32
 #define BFA_ITNIM_MAX   1024
@@ -51,14 +79,22 @@ bfa_ioim_get_index(u32 n) {
        if (n >= (1UL)<<22)
                return BFA_IOBUCKET_MAX - 1;
        n >>= 8;
-       if (n >= (1UL)<<16)
-               n >>= 16; pos += 16;
-       if (n >= 1 << 8)
-               n >>= 8; pos += 8;
-       if (n >= 1 << 4)
-               n >>= 4; pos += 4;
-       if (n >= 1 << 2)
-               n >>= 2; pos += 2;
+       if (n >= (1UL)<<16) {
+               n >>= 16;
+               pos += 16;
+       }
+       if (n >= 1 << 8) {
+               n >>= 8;
+               pos += 8;
+       }
+       if (n >= 1 << 4) {
+               n >>= 4;
+               pos += 4;
+       }
+       if (n >= 1 << 2) {
+               n >>= 2;
+               pos += 2;
+       }
        if (n >= 1 << 1)
                pos += 1;
 
@@ -74,26 +110,26 @@ struct bfad_ioim_s;
 struct bfad_tskim_s;
 
 typedef void    (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
+typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
 
-struct bfa_fcpim_mod_s {
+struct bfa_fcpim_s {
        struct bfa_s            *bfa;
+       struct bfa_fcp_mod_s    *fcp;
        struct bfa_itnim_s      *itnim_arr;
        struct bfa_ioim_s       *ioim_arr;
        struct bfa_ioim_sp_s    *ioim_sp_arr;
        struct bfa_tskim_s      *tskim_arr;
-       struct bfa_dma_s        snsbase;
        int                     num_itnims;
-       int                     num_ioim_reqs;
        int                     num_tskim_reqs;
        u32                     path_tov;
        u16                     q_depth;
        u8                      reqq;           /*  Request queue to be used */
-       u8                      rsvd;
+       u8                      lun_masking_pending;
        struct list_head        itnim_q;        /*  queue of active itnim */
-       struct list_head        ioim_free_q;    /*  free IO resources   */
        struct list_head        ioim_resfree_q; /*  IOs waiting for f/w */
        struct list_head        ioim_comp_q;    /*  IO global comp Q    */
        struct list_head        tskim_free_q;
+       struct list_head        tskim_unused_q; /* Unused tskim Q */
        u32                     ios_active;     /*  current active IOs  */
        u32                     delay_comp;
        struct bfa_fcpim_del_itn_stats_s del_itn_stats;
@@ -104,6 +140,25 @@ struct bfa_fcpim_mod_s {
        bfa_fcpim_profile_t     profile_start;
 };
 
+/* Max FCP dma segs required */
+#define BFA_FCP_DMA_SEGS       BFI_IOIM_SNSBUF_SEGS
+
+struct bfa_fcp_mod_s {
+       struct bfa_s            *bfa;
+       struct list_head        iotag_ioim_free_q;      /* free IO resources */
+       struct list_head        iotag_tio_free_q;       /* free IO resources */
+       struct list_head        iotag_unused_q; /* unused IO resources*/
+       struct bfa_iotag_s      *iotag_arr;
+       struct bfa_itn_s        *itn_arr;
+       int                     num_ioim_reqs;
+       int                     num_fwtio_reqs;
+       int                     num_itns;
+       struct bfa_dma_s        snsbase[BFA_FCP_DMA_SEGS];
+       struct bfa_fcpim_s      fcpim;
+       struct bfa_mem_dma_s    dma_seg[BFA_FCP_DMA_SEGS];
+       struct bfa_mem_kva_s    kva_seg;
+};
+
 /*
  * BFA IO (initiator mode)
  */
@@ -111,7 +166,7 @@ struct bfa_ioim_s {
        struct list_head        qe;             /*  queue elememt       */
        bfa_sm_t                sm;             /*  BFA ioim state machine */
        struct bfa_s            *bfa;           /*  BFA module  */
-       struct bfa_fcpim_mod_s  *fcpim;         /*  parent fcpim module */
+       struct bfa_fcpim_s      *fcpim;         /*  parent fcpim module */
        struct bfa_itnim_s      *itnim;         /*  i-t-n nexus for this IO  */
        struct bfad_ioim_s      *dio;           /*  driver IO handle    */
        u16                     iotag;          /*  FWI IO tag  */
@@ -124,12 +179,13 @@ struct bfa_ioim_s {
        bfa_cb_cbfn_t           io_cbfn;        /*  IO completion handler */
        struct bfa_ioim_sp_s    *iosp;          /*  slow-path IO handling */
        u8                      reqq;           /*  Request queue for I/O */
+       u8                      mode;           /*  IO is passthrough or not */
        u64                     start_time;     /*  IO's Profile start val */
+       bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
 };
 
 struct bfa_ioim_sp_s {
        struct bfi_msg_s        comp_rspmsg;    /*  IO comp f/w response */
-       u8                      *snsinfo;       /*  sense info for this IO   */
        struct bfa_sgpg_wqe_s   sgpg_wqe;       /*  waitq elem for sgpg */
        struct bfa_reqq_wait_s  reqq_wait;      /*  to wait for room in reqq */
        bfa_boolean_t           abort_explicit; /*  aborted by OS       */
@@ -143,7 +199,7 @@ struct bfa_tskim_s {
        struct list_head        qe;
        bfa_sm_t                sm;
        struct bfa_s            *bfa;   /*  BFA module  */
-       struct bfa_fcpim_mod_s  *fcpim; /*  parent fcpim module */
+       struct bfa_fcpim_s      *fcpim; /*  parent fcpim module */
        struct bfa_itnim_s      *itnim; /*  i-t-n nexus for this IO  */
        struct bfad_tskim_s     *dtsk;  /*  driver task mgmt cmnd       */
        bfa_boolean_t           notify; /*  notify itnim on TM comp  */
@@ -182,13 +238,13 @@ struct bfa_itnim_s {
        struct bfa_wc_s wc;             /*  waiting counter     */
        struct bfa_timer_s timer;       /*  pending IO TOV       */
        struct bfa_reqq_wait_s reqq_wait; /*  to wait for room in reqq */
-       struct bfa_fcpim_mod_s *fcpim;  /*  fcpim module        */
+       struct bfa_fcpim_s *fcpim;      /*  fcpim module        */
        struct bfa_itnim_iostats_s      stats;
        struct bfa_itnim_ioprofile_s  ioprofile;
 };
 
 #define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
-#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
+#define BFA_FCPIM(_hal)        (&(_hal)->modules.fcp_mod.fcpim)
 #define BFA_IOIM_TAG_2_ID(_iotag)      ((_iotag) & BFA_IOIM_IOTAG_MASK)
 #define BFA_IOIM_FROM_TAG(_fcpim, _iotag)      \
        (&fcpim->ioim_arr[(_iotag & BFA_IOIM_IOTAG_MASK)])
@@ -196,15 +252,19 @@ struct bfa_itnim_s {
        (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
 
 #define bfa_io_profile_start_time(_bfa)        \
-       (_bfa->modules.fcpim_mod.io_profile_start_time)
+       ((_bfa)->modules.fcp_mod.fcpim.io_profile_start_time)
 #define bfa_fcpim_get_io_profile(_bfa) \
-       (_bfa->modules.fcpim_mod.io_profile)
+       ((_bfa)->modules.fcp_mod.fcpim.io_profile)
 #define bfa_ioim_update_iotag(__ioim) do {                             \
        uint16_t k = (__ioim)->iotag >> BFA_IOIM_RETRY_TAG_OFFSET;      \
        k++; (__ioim)->iotag &= BFA_IOIM_IOTAG_MASK;                    \
        (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET;              \
 } while (0)
 
+#define BFA_IOIM_TO_LPS(__ioim)                \
+       BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa),      \
+               __ioim->itnim->rport->rport_info.lp_tag)
+
 static inline bfa_boolean_t
 bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
 {
@@ -217,8 +277,7 @@ bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
 /*
  * function prototypes
  */
-void   bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
-                                       struct bfa_meminfo_s *minfo);
+void   bfa_ioim_attach(struct bfa_fcpim_s *fcpim);
 void   bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 void   bfa_ioim_good_comp_isr(struct bfa_s *bfa,
                                        struct bfi_msg_s *msg);
@@ -228,18 +287,15 @@ void      bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
 void   bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
 void   bfa_ioim_tov(struct bfa_ioim_s *ioim);
 
-void   bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
-                                       struct bfa_meminfo_s *minfo);
+void   bfa_tskim_attach(struct bfa_fcpim_s *fcpim);
 void   bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 void   bfa_tskim_iodone(struct bfa_tskim_s *tskim);
 void   bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
 void   bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
+void   bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw);
 
-void   bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-                                       u32 *dm_len);
-void   bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim,
-                                       struct bfa_meminfo_s *minfo);
-void   bfa_itnim_detach(struct bfa_fcpim_mod_s *fcpim);
+void   bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len);
+void   bfa_itnim_attach(struct bfa_fcpim_s *fcpim);
 void   bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
 void   bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 void   bfa_itnim_iodone(struct bfa_itnim_s *itnim);
@@ -252,13 +308,19 @@ bfa_boolean_t   bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
 void   bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
 u16    bfa_fcpim_path_tov_get(struct bfa_s *bfa);
 u16    bfa_fcpim_qdepth_get(struct bfa_s *bfa);
+bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
+                       struct bfa_itnim_iostats_s *stats, u8 lp_tag);
+void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
+                       struct bfa_itnim_iostats_s *itnim_stats);
+bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
+bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
 
 #define bfa_fcpim_ioredirect_enabled(__bfa)                            \
-       (((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect)
+       (((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect)
 
 #define bfa_fcpim_get_next_reqq(__bfa, __qid)                          \
 {                                                                      \
-       struct bfa_fcpim_mod_s *__fcpim = BFA_FCPIM_MOD(__bfa);      \
+       struct bfa_fcpim_s *__fcpim = BFA_FCPIM(__bfa);      \
        __fcpim->reqq++;                                                \
        __fcpim->reqq &= (BFI_IOC_MAX_CQS - 1);      \
        *(__qid) = __fcpim->reqq;                                       \
@@ -352,4 +414,14 @@ void bfa_tskim_start(struct bfa_tskim_s *tskim,
 void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
                        enum bfi_tskim_status tsk_status);
 
+void   bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn,
+                       wwn_t rp_wwn, u16 rp_tag, u8 lp_tag);
+bfa_status_t   bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 on_off);
+bfa_status_t   bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf);
+bfa_status_t   bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id,
+                               wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
+bfa_status_t   bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id,
+                               wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
+bfa_status_t   bfa_fcpim_lunmask_clear(struct bfa_s *bfa);
+
 #endif /* __BFA_FCPIM_H__ */
index 9b43ca4b67788cd133880b2c647d75bb8859a877..eaac57e1ddec4fd42a0ef08f47494c9310a3bc14 100644 (file)
@@ -20,6 +20,7 @@
  */
 
 #include "bfad_drv.h"
+#include "bfad_im.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
 
@@ -92,25 +93,49 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
 void
 bfa_fcs_init(struct bfa_fcs_s *fcs)
 {
-       int             i, npbc_vports;
+       int     i;
        struct bfa_fcs_mod_s  *mod;
-       struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
 
        for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
                mod = &fcs_modules[i];
                if (mod->modinit)
                        mod->modinit(fcs);
        }
+}
+
+/*
+ * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
+ * with values learned during bfa_init firmware GETATTR REQ.
+ */
+void
+bfa_fcs_update_cfg(struct bfa_fcs_s *fcs)
+{
+       struct bfa_fcs_fabric_s *fabric = &fcs->fabric;
+       struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
+       struct bfa_ioc_s *ioc = &fabric->fcs->bfa->ioc;
+
+       port_cfg->nwwn = ioc->attr->nwwn;
+       port_cfg->pwwn = ioc->attr->pwwn;
+}
+
+/*
+ * fcs pbc vport initialization
+ */
+void
+bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs)
+{
+       int i, npbc_vports;
+       struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
+
        /* Initialize pbc vports */
        if (!fcs->min_cfg) {
                npbc_vports =
-                   bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports);
+                       bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports);
                for (i = 0; i < npbc_vports; i++)
                        bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]);
        }
 }
 
-
 /*
  *     brief
  *             FCS driver details initialization.
@@ -168,11 +193,14 @@ bfa_fcs_exit(struct bfa_fcs_s *fcs)
 #define BFA_FCS_FABRIC_CLEANUP_DELAY   (10000) /* Milliseconds */
 
 #define bfa_fcs_fabric_set_opertype(__fabric) do {                     \
-               if (bfa_fcport_get_topology((__fabric)->fcs->bfa)       \
-                   == BFA_PORT_TOPOLOGY_P2P)                           \
+       if (bfa_fcport_get_topology((__fabric)->fcs->bfa)               \
+                               == BFA_PORT_TOPOLOGY_P2P) {             \
+               if (fabric->fab_type == BFA_FCS_FABRIC_SWITCHED)        \
                        (__fabric)->oper_type = BFA_PORT_TYPE_NPORT;    \
                else                                                    \
-                       (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT;   \
+                       (__fabric)->oper_type = BFA_PORT_TYPE_P2P;      \
+       } else                                                          \
+               (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT;           \
 } while (0)
 
 /*
@@ -196,6 +224,9 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
                                         u32 rsp_len,
                                         u32 resid_len,
                                         struct fchs_s *rspfchs);
+static u8 bfa_fcs_fabric_oper_bbscn(struct bfa_fcs_fabric_s *fabric);
+static bfa_boolean_t bfa_fcs_fabric_is_bbscn_enabled(
+                               struct bfa_fcs_fabric_s *fabric);
 
 static void    bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
                                         enum bfa_fcs_fabric_event event);
@@ -269,8 +300,8 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
                break;
 
        case BFA_FCS_FABRIC_SM_DELETE:
-               bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
-               bfa_wc_down(&fabric->fcs->wc);
+               bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+               bfa_fcs_fabric_delete(fabric);
                break;
 
        default:
@@ -322,7 +353,8 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
        case BFA_FCS_FABRIC_SM_CONT_OP:
 
                bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
-                                          fabric->bb_credit);
+                                          fabric->bb_credit,
+                                          bfa_fcs_fabric_oper_bbscn(fabric));
                fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
 
                if (fabric->auth_reqd && fabric->is_auth) {
@@ -350,7 +382,8 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
        case BFA_FCS_FABRIC_SM_NO_FABRIC:
                fabric->fab_type = BFA_FCS_FABRIC_N2N;
                bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
-                                          fabric->bb_credit);
+                                          fabric->bb_credit,
+                                          bfa_fcs_fabric_oper_bbscn(fabric));
                bfa_fcs_fabric_notify_online(fabric);
                bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
                break;
@@ -518,7 +551,11 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
        case BFA_FCS_FABRIC_SM_NO_FABRIC:
                bfa_trc(fabric->fcs, fabric->bb_credit);
                bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
-                                          fabric->bb_credit);
+                                          fabric->bb_credit,
+                                          bfa_fcs_fabric_oper_bbscn(fabric));
+               break;
+
+       case BFA_FCS_FABRIC_SM_RETRY_OP:
                break;
 
        default:
@@ -764,6 +801,10 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
 
        case BFA_STATUS_FABRIC_RJT:
                fabric->stats.flogi_rejects++;
+               if (fabric->lps->lsrjt_rsn == FC_LS_RJT_RSN_LOGICAL_ERROR &&
+                   fabric->lps->lsrjt_expl == FC_LS_RJT_EXP_NO_ADDL_INFO)
+                       fabric->fcs->bbscn_flogi_rjt = BFA_TRUE;
+
                bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
                return;
 
@@ -793,6 +834,7 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
                 */
                fabric->bport.port_topo.pn2n.rem_port_wwn =
                        fabric->lps->pr_pwwn;
+               fabric->fab_type = BFA_FCS_FABRIC_N2N;
                bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
        }
 
@@ -808,13 +850,17 @@ bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
 {
        struct bfa_s            *bfa = fabric->fcs->bfa;
        struct bfa_lport_cfg_s  *pcfg = &fabric->bport.port_cfg;
-       u8                      alpa = 0;
+       u8                      alpa = 0, bb_scn = 0;
 
        if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)
                alpa = bfa_fcport_get_myalpa(bfa);
 
+       if (bfa_fcs_fabric_is_bbscn_enabled(fabric) &&
+           (!fabric->fcs->bbscn_flogi_rjt))
+               bb_scn = BFA_FCS_PORT_DEF_BB_SCN;
+
        bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa),
-                     pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd);
+                     pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd, bb_scn);
 
        fabric->stats.flogi_sent++;
 }
@@ -872,6 +918,40 @@ bfa_fcs_fabric_delay(void *cbarg)
        bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
 }
 
+/*
+ * Computes operating BB_SCN value
+ */
+static u8
+bfa_fcs_fabric_oper_bbscn(struct bfa_fcs_fabric_s *fabric)
+{
+       u8      pr_bbscn = fabric->lps->pr_bbscn;
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fabric->fcs->bfa);
+
+       if (!(fcport->cfg.bb_scn_state && pr_bbscn))
+               return 0;
+
+       /* return max of local/remote bb_scn values */
+       return ((pr_bbscn > BFA_FCS_PORT_DEF_BB_SCN) ?
+               pr_bbscn : BFA_FCS_PORT_DEF_BB_SCN);
+}
+
+/*
+ * Check if BB_SCN can be enabled.
+ */
+static bfa_boolean_t
+bfa_fcs_fabric_is_bbscn_enabled(struct bfa_fcs_fabric_s *fabric)
+{
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fabric->fcs->bfa);
+
+       if (bfa_ioc_get_fcmode(&fabric->fcs->bfa->ioc) &&
+                       fcport->cfg.bb_scn_state &&
+                       !bfa_fcport_is_qos_enabled(fabric->fcs->bfa) &&
+                       !bfa_fcport_is_trunk_enabled(fabric->fcs->bfa))
+               return BFA_TRUE;
+       else
+               return BFA_FALSE;
+}
+
 /*
  * Delete all vports and wait for vport delete completions.
  */
@@ -989,6 +1069,7 @@ void
 bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
 {
        bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+       fabric->fcs->bbscn_flogi_rjt = BFA_FALSE;
        bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
 }
 
@@ -1192,6 +1273,7 @@ bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
        }
 
        fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred);
+       fabric->lps->pr_bbscn = (be16_to_cpu(flogi->csp.rxsz) >> 12);
        bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
        bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
 
@@ -1224,9 +1306,10 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
                                    n2n_port->reply_oxid, pcfg->pwwn,
                                    pcfg->nwwn,
                                    bfa_fcport_get_maxfrsize(bfa),
-                                   bfa_fcport_get_rx_bbcredit(bfa));
+                                   bfa_fcport_get_rx_bbcredit(bfa),
+                                   bfa_fcs_fabric_oper_bbscn(fabric));
 
-       bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->lp_tag,
+       bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->bfa_tag,
                      BFA_FALSE, FC_CLASS_3,
                      reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric,
                      FC_MAX_PDUSZ, 0);
@@ -1245,6 +1328,29 @@ bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
        bfa_trc(fabric->fcs, status);
 }
 
+
+/*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_fabric_aen_post(struct bfa_fcs_lport_s *port,
+                       enum bfa_port_aen_event event)
+{
+       struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+       struct bfa_aen_entry_s  *aen_entry;
+
+       bfad_get_aen_entry(bfad, aen_entry);
+       if (!aen_entry)
+               return;
+
+       aen_entry->aen_data.port.pwwn = bfa_fcs_lport_get_pwwn(port);
+       aen_entry->aen_data.port.fwwn = bfa_fcs_lport_get_fabric_name(port);
+
+       /* Send the AEN notification */
+       bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+                                 BFA_AEN_CAT_PORT, event);
+}
+
 /*
  *
  * @param[in] fabric - fabric
@@ -1276,6 +1382,8 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
                BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
                        "Base port WWN = %s Fabric WWN = %s\n",
                        pwwn_ptr, fwwn_ptr);
+               bfa_fcs_fabric_aen_post(&fabric->bport,
+                               BFA_PORT_AEN_FABRIC_NAME_CHANGE);
        }
 }
 
@@ -1297,6 +1405,45 @@ bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
        return NULL;
 }
 
+/*
+ *     Return the list of local logical ports present in the given VF.
+ *
+ *     @param[in]      vf      vf for which logical ports are returned
+ *     @param[out]     lpwwn   returned logical port wwn list
+ *     @param[in,out]  nlports in:size of lpwwn list;
+ *                             out:total elements present,
+ *                             actual elements returned is limited by the size
+ */
+void
+bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
+{
+       struct list_head *qe;
+       struct bfa_fcs_vport_s *vport;
+       int     i = 0;
+       struct bfa_fcs_s        *fcs;
+
+       if (vf == NULL || lpwwn == NULL || *nlports == 0)
+               return;
+
+       fcs = vf->fcs;
+
+       bfa_trc(fcs, vf->vf_id);
+       bfa_trc(fcs, (uint32_t) *nlports);
+
+       lpwwn[i++] = vf->bport.port_cfg.pwwn;
+
+       list_for_each(qe, &vf->vport_q) {
+               if (i >= *nlports)
+                       break;
+
+               vport = (struct bfa_fcs_vport_s *) qe;
+               lpwwn[i++] = vport->lport.port_cfg.pwwn;
+       }
+
+       bfa_trc(fcs, i);
+       *nlports = i;
+}
+
 /*
  * BFA FCS PPORT ( physical port)
  */
index 61cdce4bd913031342baddcdf4d65ea2cbcabb99..e75e07d25915250b7ec16d6d1087b220c882c85d 100644 (file)
@@ -254,6 +254,9 @@ struct bfa_fcs_fabric_s;
 #define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ                        48
 #define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ               16
 
+/* bb_scn value in 2^bb_scn */
+#define BFA_FCS_PORT_DEF_BB_SCN                                3
+
 /*
  * Get FC port ID for a logical port.
  */
@@ -379,6 +382,7 @@ void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
 void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
 void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
 void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport);
 
 #define BFA_FCS_RPORT_DEF_DEL_TIMEOUT  90      /* in secs */
 #define BFA_FCS_RPORT_MAX_RETRIES      (5)
@@ -420,6 +424,7 @@ struct bfa_fcs_rport_s {
        enum fc_cos     fc_cos; /*  FC classes of service supp */
        bfa_boolean_t   cisc;   /*  CISC capable device */
        bfa_boolean_t   prlo;   /*  processing prlo or LOGO */
+       bfa_boolean_t   plogi_pending;  /* Rx Plogi Pending */
        wwn_t   pwwn;   /*  port wwn of rport */
        wwn_t   nwwn;   /*  node wwn of rport */
        struct bfa_rport_symname_s psym_name; /*  port symbolic name  */
@@ -447,6 +452,8 @@ bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
 /*
  * bfa fcs rport API functions
  */
+void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
+                       struct bfa_rport_attr_s *attr);
 struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
                                             wwn_t rpwwn);
 struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
@@ -591,10 +598,21 @@ void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
 void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
                        struct fchs_s *fchs, u16 len);
 
-#define        BFA_FCS_FDMI_SUPORTED_SPEEDS  (FDMI_TRANS_SPEED_1G  |   \
-                                      FDMI_TRANS_SPEED_2G |    \
-                                      FDMI_TRANS_SPEED_4G |    \
-                                      FDMI_TRANS_SPEED_8G)
+#define BFA_FCS_FDMI_SUPP_SPEEDS_4G    (FDMI_TRANS_SPEED_1G  | \
+                               FDMI_TRANS_SPEED_2G |           \
+                               FDMI_TRANS_SPEED_4G)
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_8G    (FDMI_TRANS_SPEED_1G  | \
+                               FDMI_TRANS_SPEED_2G |           \
+                               FDMI_TRANS_SPEED_4G |           \
+                               FDMI_TRANS_SPEED_8G)
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_16G   (FDMI_TRANS_SPEED_2G  | \
+                               FDMI_TRANS_SPEED_4G |           \
+                               FDMI_TRANS_SPEED_8G |           \
+                               FDMI_TRANS_SPEED_16G)
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_10G   FDMI_TRANS_SPEED_10G
 
 /*
  * HBA Attribute Block : BFA internal representation. Note : Some variable
@@ -649,12 +667,15 @@ struct bfa_fcs_s {
        struct bfa_trc_mod_s  *trcmod;  /*  tracing module */
        bfa_boolean_t   vf_enabled;     /*  VF mode is enabled */
        bfa_boolean_t   fdmi_enabled;   /*  FDMI is enabled */
+       bfa_boolean_t   bbscn_enabled;  /*  Driver Config Parameter */
+       bfa_boolean_t   bbscn_flogi_rjt;/*  FLOGI reject due to BB_SCN */
        bfa_boolean_t min_cfg;          /* min cfg enabled/disabled */
        u16     port_vfid;      /*  port default VF ID */
        struct bfa_fcs_driver_info_s driver_info;
        struct bfa_fcs_fabric_s fabric; /*  base fabric state machine */
        struct bfa_fcs_stats_s  stats;  /*  FCS statistics */
        struct bfa_wc_s         wc;     /*  waiting counter */
+       int                     fcs_aen_seq;
 };
 
 /*
@@ -715,6 +736,8 @@ void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
                    struct bfad_s *bfad,
                    bfa_boolean_t min_cfg);
 void bfa_fcs_init(struct bfa_fcs_s *fcs);
+void bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs);
+void bfa_fcs_update_cfg(struct bfa_fcs_s *fcs);
 void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
                              struct bfa_fcs_driver_info_s *driver_info);
 void bfa_fcs_exit(struct bfa_fcs_s *fcs);
@@ -723,6 +746,7 @@ void bfa_fcs_exit(struct bfa_fcs_s *fcs);
  * bfa fcs vf public functions
  */
 bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
+void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
 
 /*
  * fabric protected interface functions
index e7b49f4cb51f1a5affd4ec8ad94b92755c1f5c95..9272840a2409fe6075c26604d9d62bc135e0774c 100644 (file)
@@ -37,6 +37,8 @@ static void   bfa_fcs_itnim_prli_response(void *fcsarg,
                         struct bfa_fcxp_s *fcxp, void *cbarg,
                            bfa_status_t req_status, u32 rsp_len,
                            u32 resid_len, struct fchs_s *rsp_fchs);
+static void    bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
+                       enum bfa_itnim_aen_event event);
 
 /*
  *  fcs_itnim_sm FCS itnim state machine events
@@ -54,6 +56,7 @@ enum bfa_fcs_itnim_event {
        BFA_FCS_ITNIM_SM_INITIATOR = 9, /*  rport is initiator */
        BFA_FCS_ITNIM_SM_DELETE = 10,   /*  delete event from rport */
        BFA_FCS_ITNIM_SM_PRLO = 11,     /*  delete event from rport */
+       BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
 };
 
 static void    bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
@@ -178,6 +181,10 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
                                BFA_FCS_RETRY_TIMEOUT);
                break;
 
+       case BFA_FCS_ITNIM_SM_RSP_NOT_SUPP:
+               bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+               break;
+
        case BFA_FCS_ITNIM_SM_OFFLINE:
                bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
                bfa_fcxp_discard(itnim->fcxp);
@@ -264,6 +271,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                "Target (WWN = %s) is online for initiator (WWN = %s)\n",
                rpwwn_buf, lpwwn_buf);
+               bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE);
                break;
 
        case BFA_FCS_ITNIM_SM_OFFLINE:
@@ -300,14 +308,17 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
                bfa_itnim_offline(itnim->bfa_itnim);
                wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
                wwn2str(rpwwn_buf, itnim->rport->pwwn);
-               if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE)
+               if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) {
                        BFA_LOG(KERN_ERR, bfad, bfa_log_level,
                        "Target (WWN = %s) connectivity lost for "
                        "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf);
-               else
+                       bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
+               } else {
                        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                        "Target (WWN = %s) offlined by initiator (WWN = %s)\n",
                        rpwwn_buf, lpwwn_buf);
+                       bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
+               }
                break;
 
        case BFA_FCS_ITNIM_SM_DELETE:
@@ -376,6 +387,33 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
        }
 }
 
+static void
+bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
+                       enum bfa_itnim_aen_event event)
+{
+       struct bfa_fcs_rport_s *rport = itnim->rport;
+       struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
+       struct bfa_aen_entry_s  *aen_entry;
+
+       /* Don't post events for well known addresses */
+       if (BFA_FCS_PID_IS_WKA(rport->pid))
+               return;
+
+       bfad_get_aen_entry(bfad, aen_entry);
+       if (!aen_entry)
+               return;
+
+       aen_entry->aen_data.itnim.vf_id = rport->port->fabric->vf_id;
+       aen_entry->aen_data.itnim.ppwwn = bfa_fcs_lport_get_pwwn(
+                                       bfa_fcs_get_base_port(itnim->fcs));
+       aen_entry->aen_data.itnim.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
+       aen_entry->aen_data.itnim.rpwwn = rport->pwwn;
+
+       /* Send the AEN notification */
+       bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
+                                 BFA_AEN_CAT_ITNIM, event);
+}
+
 static void
 bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 {
@@ -447,6 +485,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
                                itnim->rport->scsi_function =
                                         BFA_RPORT_INITIATOR;
                                itnim->stats.prli_rsp_acc++;
+                               itnim->stats.initiator++;
                                bfa_sm_send_event(itnim,
                                                  BFA_FCS_ITNIM_SM_RSP_OK);
                                return;
@@ -472,6 +511,10 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
                bfa_trc(itnim->fcs, ls_rjt->reason_code_expl);
 
                itnim->stats.prli_rsp_rjt++;
+               if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) {
+                       bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_NOT_SUPP);
+                       return;
+               }
                bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR);
        }
 }
index 1d6be8c144734a085bb7c90f7a5b5703f876557f..d4f951fe753eecb1fbccade0e96e5adde904b986 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include "bfad_drv.h"
+#include "bfad_im.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
 #include "bfa_fc.h"
@@ -74,6 +75,7 @@ enum bfa_fcs_lport_event {
        BFA_FCS_PORT_SM_OFFLINE = 3,
        BFA_FCS_PORT_SM_DELETE = 4,
        BFA_FCS_PORT_SM_DELRPORT = 5,
+       BFA_FCS_PORT_SM_STOP = 6,
 };
 
 static void     bfa_fcs_lport_sm_uninit(struct bfa_fcs_lport_s *port,
@@ -86,6 +88,8 @@ static void     bfa_fcs_lport_sm_offline(struct bfa_fcs_lport_s *port,
                                        enum bfa_fcs_lport_event event);
 static void     bfa_fcs_lport_sm_deleting(struct bfa_fcs_lport_s *port,
                                        enum bfa_fcs_lport_event event);
+static void    bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
+                                       enum bfa_fcs_lport_event event);
 
 static void
 bfa_fcs_lport_sm_uninit(
@@ -123,6 +127,12 @@ bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
                bfa_fcs_lport_deleted(port);
                break;
 
+       case BFA_FCS_PORT_SM_STOP:
+               /* If vport - send completion call back */
+               if (port->vport)
+                       bfa_fcs_vport_stop_comp(port->vport);
+               break;
+
        case BFA_FCS_PORT_SM_OFFLINE:
                break;
 
@@ -148,6 +158,23 @@ bfa_fcs_lport_sm_online(
                bfa_fcs_lport_offline_actions(port);
                break;
 
+       case BFA_FCS_PORT_SM_STOP:
+               __port_action[port->fabric->fab_type].offline(port);
+
+               if (port->num_rports == 0) {
+                       bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+                       /* If vport - send completion call back */
+                       if (port->vport)
+                               bfa_fcs_vport_stop_comp(port->vport);
+               } else {
+                       bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
+                       list_for_each_safe(qe, qen, &port->rport_q) {
+                               rport = (struct bfa_fcs_rport_s *) qe;
+                               bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+                       }
+               }
+               break;
+
        case BFA_FCS_PORT_SM_DELETE:
 
                __port_action[port->fabric->fab_type].offline(port);
@@ -189,6 +216,21 @@ bfa_fcs_lport_sm_offline(
                bfa_fcs_lport_online_actions(port);
                break;
 
+       case BFA_FCS_PORT_SM_STOP:
+               if (port->num_rports == 0) {
+                       bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+                       /* If vport - send completion call back */
+                       if (port->vport)
+                               bfa_fcs_vport_stop_comp(port->vport);
+               } else {
+                       bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
+                       list_for_each_safe(qe, qen, &port->rport_q) {
+                               rport = (struct bfa_fcs_rport_s *) qe;
+                               bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+                       }
+               }
+               break;
+
        case BFA_FCS_PORT_SM_DELETE:
                if (port->num_rports == 0) {
                        bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
@@ -211,6 +253,28 @@ bfa_fcs_lport_sm_offline(
        }
 }
 
+static void
+bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
+                         enum bfa_fcs_lport_event event)
+{
+       bfa_trc(port->fcs, port->port_cfg.pwwn);
+       bfa_trc(port->fcs, event);
+
+       switch (event) {
+       case BFA_FCS_PORT_SM_DELRPORT:
+               if (port->num_rports == 0) {
+                       bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+                       /* If vport - send completion call back */
+                       if (port->vport)
+                               bfa_fcs_vport_stop_comp(port->vport);
+               }
+               break;
+
+       default:
+               bfa_sm_fault(port->fcs, event);
+       }
+}
+
 static void
 bfa_fcs_lport_sm_deleting(
        struct bfa_fcs_lport_s *port,
@@ -236,6 +300,31 @@ bfa_fcs_lport_sm_deleting(
  *  fcs_port_pvt
  */
 
+/*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_lport_aen_post(struct bfa_fcs_lport_s *port,
+                       enum bfa_lport_aen_event event)
+{
+       struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+       struct bfa_aen_entry_s  *aen_entry;
+
+       bfad_get_aen_entry(bfad, aen_entry);
+       if (!aen_entry)
+               return;
+
+       aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
+       aen_entry->aen_data.lport.roles = port->port_cfg.roles;
+       aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
+                                       bfa_fcs_get_base_port(port->fcs));
+       aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
+
+       /* Send the AEN notification */
+       bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+                                 BFA_AEN_CAT_LPORT, event);
+}
+
 /*
  * Send a LS reject
  */
@@ -264,6 +353,40 @@ bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
                          FC_MAX_PDUSZ, 0);
 }
 
+/*
+ * Send a FCCT Reject
+ */
+static void
+bfa_fcs_lport_send_fcgs_rjt(struct bfa_fcs_lport_s *port,
+       struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl)
+{
+       struct fchs_s   fchs;
+       struct bfa_fcxp_s *fcxp;
+       struct bfa_rport_s *bfa_rport = NULL;
+       int             len;
+       struct ct_hdr_s *rx_cthdr = (struct ct_hdr_s *)(rx_fchs + 1);
+       struct ct_hdr_s *ct_hdr;
+
+       bfa_trc(port->fcs, rx_fchs->d_id);
+       bfa_trc(port->fcs, rx_fchs->s_id);
+
+       fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+       if (!fcxp)
+               return;
+
+       ct_hdr = bfa_fcxp_get_reqbuf(fcxp);
+       ct_hdr->gs_type = rx_cthdr->gs_type;
+       ct_hdr->gs_sub_type = rx_cthdr->gs_sub_type;
+
+       len = fc_gs_rjt_build(&fchs, ct_hdr, rx_fchs->s_id,
+                       bfa_fcs_lport_get_fcid(port),
+                       rx_fchs->ox_id, reason_code, reason_code_expl);
+
+       bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
+                       BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+                       FC_MAX_PDUSZ, 0);
+}
+
 /*
  * Process incoming plogi from a remote port.
  */
@@ -496,6 +619,7 @@ bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port)
        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                "Logical port online: WWN = %s Role = %s\n",
                lpwwn_buf, "Initiator");
+       bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE);
 
        bfad->bfad_flags |= BFAD_PORT_ONLINE;
 }
@@ -514,14 +638,17 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
 
        wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
        if (bfa_sm_cmp_state(port->fabric,
-                       bfa_fcs_fabric_sm_online) == BFA_TRUE)
+                       bfa_fcs_fabric_sm_online) == BFA_TRUE) {
                BFA_LOG(KERN_ERR, bfad, bfa_log_level,
                "Logical port lost fabric connectivity: WWN = %s Role = %s\n",
                lpwwn_buf, "Initiator");
-       else
+               bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
+       } else {
                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                "Logical port taken offline: WWN = %s Role = %s\n",
                lpwwn_buf, "Initiator");
+               bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE);
+       }
 
        list_for_each_safe(qe, qen, &port->rport_q) {
                rport = (struct bfa_fcs_rport_s *) qe;
@@ -579,6 +706,7 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                "Logical port deleted: WWN = %s Role = %s\n",
                lpwwn_buf, "Initiator");
+       bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE);
 
        /* Base port will be deleted by the OS driver */
        if (port->vport) {
@@ -647,6 +775,16 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
                        bfa_fcs_lport_abts_acc(lport, fchs);
                return;
        }
+
+       if (fchs->type == FC_TYPE_SERVICES) {
+               /*
+                * Unhandled FC-GS frames. Send a FC-CT Reject
+                */
+               bfa_fcs_lport_send_fcgs_rjt(lport, fchs, CT_RSN_NOT_SUPP,
+                               CT_NS_EXP_NOADDITIONAL);
+               return;
+       }
+
        /*
         * look for a matching remote port ID
         */
@@ -835,8 +973,8 @@ bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
        lport->fcs = fcs;
        lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
        lport->vport = vport;
-       lport->lp_tag = (vport) ? vport->lps->lp_tag :
-                                 lport->fabric->lps->lp_tag;
+       lport->lp_tag = (vport) ? vport->lps->bfa_tag :
+                                 lport->fabric->lps->bfa_tag;
 
        INIT_LIST_HEAD(&lport->rport_q);
        lport->num_rports = 0;
@@ -866,6 +1004,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                "New logical port created: WWN = %s Role = %s\n",
                lpwwn_buf, "Initiator");
+       bfa_fcs_lport_aen_post(lport, BFA_LPORT_AEN_NEW);
 
        bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit);
        bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
@@ -1074,6 +1213,8 @@ static void       bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
                                 struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
 static void    bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
                                  struct bfa_fcs_fdmi_port_attr_s *port_attr);
+u32    bfa_fcs_fdmi_convert_speed(enum bfa_port_speed pport_speed);
+
 /*
  *  fcs_fdmi_sm FCS FDMI state machine
  */
@@ -1672,7 +1813,7 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
        memcpy(attr->value, fcs_hba_attr->driver_version, templen);
        templen = fc_roundup(templen, sizeof(u32));
        curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
-       len += templen;;
+       len += templen;
        count++;
        attr->len = cpu_to_be16(templen + sizeof(attr->type) +
                             sizeof(templen));
@@ -2160,12 +2301,36 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
        /*
         * Supported Speeds
         */
-       port_attr->supp_speed = cpu_to_be32(BFA_FCS_FDMI_SUPORTED_SPEEDS);
+       switch (pport_attr.speed_supported) {
+       case BFA_PORT_SPEED_16GBPS:
+               port_attr->supp_speed =
+                       cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_16G);
+               break;
+
+       case BFA_PORT_SPEED_10GBPS:
+               port_attr->supp_speed =
+                       cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_10G);
+               break;
+
+       case BFA_PORT_SPEED_8GBPS:
+               port_attr->supp_speed =
+                       cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_8G);
+               break;
+
+       case BFA_PORT_SPEED_4GBPS:
+               port_attr->supp_speed =
+                       cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_4G);
+               break;
+
+       default:
+               bfa_sm_fault(port->fcs, pport_attr.speed_supported);
+       }
 
        /*
         * Current Speed
         */
-       port_attr->curr_speed = cpu_to_be32(pport_attr.speed);
+       port_attr->curr_speed = cpu_to_be32(
+                               bfa_fcs_fdmi_convert_speed(pport_attr.speed));
 
        /*
         * Max PDU Size.
@@ -2186,6 +2351,41 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 
 }
 
+/*
+ * Convert BFA speed to FDMI format.
+ */
+u32
+bfa_fcs_fdmi_convert_speed(bfa_port_speed_t pport_speed)
+{
+       u32     ret;
+
+       switch (pport_speed) {
+       case BFA_PORT_SPEED_1GBPS:
+       case BFA_PORT_SPEED_2GBPS:
+               ret = pport_speed;
+               break;
+
+       case BFA_PORT_SPEED_4GBPS:
+               ret = FDMI_TRANS_SPEED_4G;
+               break;
+
+       case BFA_PORT_SPEED_8GBPS:
+               ret = FDMI_TRANS_SPEED_8G;
+               break;
+
+       case BFA_PORT_SPEED_10GBPS:
+               ret = FDMI_TRANS_SPEED_10G;
+               break;
+
+       case BFA_PORT_SPEED_16GBPS:
+               ret = FDMI_TRANS_SPEED_16G;
+               break;
+
+       default:
+               ret = FDMI_TRANS_SPEED_UNKNOWN;
+       }
+       return ret;
+}
 
 void
 bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms)
@@ -2829,7 +3029,8 @@ bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
                             bfa_hton3b(FC_MGMT_SERVER),
                             bfa_fcs_lport_get_fcid(port), 0,
                             port->port_cfg.pwwn, port->port_cfg.nwwn,
-                                bfa_fcport_get_maxfrsize(port->fcs->bfa));
+                            bfa_fcport_get_maxfrsize(port->fcs->bfa),
+                            bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
 
        bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
                          FC_CLASS_3, len, &fchs,
@@ -3573,7 +3774,7 @@ bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(port->fcs, port->pid);
 
-fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
        if (!fcxp) {
                port->stats.ns_plogi_alloc_wait++;
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
@@ -3586,7 +3787,8 @@ fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
                             bfa_hton3b(FC_NAME_SERVER),
                             bfa_fcs_lport_get_fcid(port), 0,
                             port->port_cfg.pwwn, port->port_cfg.nwwn,
-                                bfa_fcport_get_maxfrsize(port->fcs->bfa));
+                            bfa_fcport_get_maxfrsize(port->fcs->bfa),
+                            bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
 
        bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
                          FC_CLASS_3, len, &fchs,
@@ -4762,8 +4964,8 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
        while (qe != qh) {
                rport = (struct bfa_fcs_rport_s *) qe;
                if ((bfa_ntoh3b(rport->pid) > 0xFFF000) ||
-                       (bfa_fcs_rport_get_state(rport) ==
-                         BFA_RPORT_OFFLINE)) {
+                       (bfa_fcs_rport_get_state(rport) == BFA_RPORT_OFFLINE) ||
+                       (rport->scsi_function != BFA_RPORT_TARGET)) {
                        qe = bfa_q_next(qe);
                        continue;
                }
@@ -4776,17 +4978,15 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
                                bfa_fcport_get_ratelim_speed(port->fcs->bfa);
                }
 
-               if      ((rport_speed  == BFA_PORT_SPEED_8GBPS) ||
-                       (rport_speed > port_speed)) {
-                       max_speed = rport_speed;
-                       break;
-               } else if (rport_speed > max_speed) {
+               if (rport_speed > max_speed)
                        max_speed = rport_speed;
-               }
 
                qe = bfa_q_next(qe);
        }
 
+       if (max_speed > port_speed)
+               max_speed = port_speed;
+
        bfa_trc(fcs, max_speed);
        return max_speed;
 }
@@ -4918,6 +5118,7 @@ enum bfa_fcs_vport_event {
        BFA_FCS_VPORT_SM_DELCOMP = 11,  /*  lport delete completion */
        BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12,      /*  Dup wnn error*/
        BFA_FCS_VPORT_SM_RSP_FAILED = 13,       /*  non-retryable failure */
+       BFA_FCS_VPORT_SM_STOPCOMP = 14, /* vport delete completion */
 };
 
 static void     bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
@@ -4930,6 +5131,8 @@ static void     bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
                                       enum bfa_fcs_vport_event event);
 static void     bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
                                             enum bfa_fcs_vport_event event);
+static void    bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport,
+                                       enum bfa_fcs_vport_event event);
 static void     bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
                                        enum bfa_fcs_vport_event event);
 static void     bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
@@ -4940,6 +5143,10 @@ static void     bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
                                      enum bfa_fcs_vport_event event);
 static void     bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
                                      enum bfa_fcs_vport_event event);
+static void    bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
+                                       enum bfa_fcs_vport_event event);
+static void    bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
+                                       enum bfa_fcs_vport_event event);
 
 static struct bfa_sm_table_s  vport_sm_table[] = {
        {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT},
@@ -4947,6 +5154,7 @@ static struct bfa_sm_table_s  vport_sm_table[] = {
        {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE},
        {BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC},
        {BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY},
+       {BFA_SM(bfa_fcs_vport_sm_fdisc_rsp_wait), BFA_FCS_VPORT_FDISC_RSP_WAIT},
        {BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE},
        {BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING},
        {BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP},
@@ -5042,6 +5250,11 @@ bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
                bfa_fcs_vport_do_fdisc(vport);
                break;
 
+       case BFA_FCS_VPORT_SM_STOP:
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+               bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP);
+               break;
+
        case BFA_FCS_VPORT_SM_OFFLINE:
                /*
                 * This can happen if the vport couldn't be initialzied
@@ -5070,9 +5283,7 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
 
        switch (event) {
        case BFA_FCS_VPORT_SM_DELETE:
-               bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
-               bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
-               bfa_fcs_lport_delete(&vport->lport);
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_rsp_wait);
                break;
 
        case BFA_FCS_VPORT_SM_OFFLINE:
@@ -5139,6 +5350,41 @@ bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
        }
 }
 
+/*
+ * FDISC is in progress and we got a vport delete request -
+ * this is a wait state while we wait for fdisc response and
+ * we will transition to the appropriate state - on rsp status.
+ */
+static void
+bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport,
+                               enum bfa_fcs_vport_event event)
+{
+       bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+       bfa_trc(__vport_fcs(vport), event);
+
+       switch (event) {
+       case BFA_FCS_VPORT_SM_RSP_OK:
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting);
+               bfa_fcs_lport_delete(&vport->lport);
+               break;
+
+       case BFA_FCS_VPORT_SM_DELETE:
+               break;
+
+       case BFA_FCS_VPORT_SM_OFFLINE:
+       case BFA_FCS_VPORT_SM_RSP_ERROR:
+       case BFA_FCS_VPORT_SM_RSP_FAILED:
+       case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+               bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
+               bfa_fcs_lport_delete(&vport->lport);
+               break;
+
+       default:
+               bfa_sm_fault(__vport_fcs(vport), event);
+       }
+}
+
 /*
  * Vport is online (FDISC is complete).
  */
@@ -5155,6 +5401,11 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
                bfa_fcs_lport_delete(&vport->lport);
                break;
 
+       case BFA_FCS_VPORT_SM_STOP:
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_stopping);
+               bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP);
+               break;
+
        case BFA_FCS_VPORT_SM_OFFLINE:
                bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
                bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
@@ -5166,6 +5417,32 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
        }
 }
 
+/*
+ * Vport is being stopped - awaiting lport stop completion to send
+ * LOGO to fabric.
+ */
+static void
+bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
+                         enum bfa_fcs_vport_event event)
+{
+       bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+       bfa_trc(__vport_fcs(vport), event);
+
+       switch (event) {
+       case BFA_FCS_VPORT_SM_STOPCOMP:
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo_for_stop);
+               bfa_fcs_vport_do_logo(vport);
+               break;
+
+       case BFA_FCS_VPORT_SM_OFFLINE:
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+               break;
+
+       default:
+               bfa_sm_fault(__vport_fcs(vport), event);
+       }
+}
+
 /*
  * Vport is being deleted - awaiting lport delete completion to send
  * LOGO to fabric.
@@ -5236,6 +5513,10 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
                bfa_fcs_vport_free(vport);
                break;
 
+       case BFA_FCS_VPORT_SM_STOPCOMP:
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
+               break;
+
        case BFA_FCS_VPORT_SM_DELETE:
                break;
 
@@ -5244,6 +5525,34 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
        }
 }
 
+/*
+ * LOGO is sent to fabric. Vport stop is in progress. Lport stop cleanup
+ * is done.
+ */
+static void
+bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
+                              enum bfa_fcs_vport_event event)
+{
+       bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+       bfa_trc(__vport_fcs(vport), event);
+
+       switch (event) {
+       case BFA_FCS_VPORT_SM_OFFLINE:
+               bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
+               /*
+                * !!! fall through !!!
+                */
+
+       case BFA_FCS_VPORT_SM_RSP_OK:
+       case BFA_FCS_VPORT_SM_RSP_ERROR:
+               bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
+               break;
+
+       default:
+               bfa_sm_fault(__vport_fcs(vport), event);
+       }
+}
+
 /*
  * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
  * is done.
@@ -5281,6 +5590,31 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
 /*
  *  fcs_vport_private FCS virtual port private functions
  */
+/*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_vport_aen_post(struct bfa_fcs_lport_s *port,
+                      enum bfa_lport_aen_event event)
+{
+       struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+       struct bfa_aen_entry_s  *aen_entry;
+
+       bfad_get_aen_entry(bfad, aen_entry);
+       if (!aen_entry)
+               return;
+
+       aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
+       aen_entry->aen_data.lport.roles = port->port_cfg.roles;
+       aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
+                                       bfa_fcs_get_base_port(port->fcs));
+       aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
+
+       /* Send the AEN notification */
+       bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+                                 BFA_AEN_CAT_LPORT, event);
+}
+
 /*
  * This routine will be called to send a FDISC command.
  */
@@ -5308,8 +5642,11 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
        case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
                if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
                        bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
-               else
+               else {
+                       bfa_fcs_vport_aen_post(&vport->lport,
+                                       BFA_LPORT_AEN_NPIV_DUP_WWN);
                        bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN);
+               }
                break;
 
        case FC_LS_RJT_EXP_INSUFF_RES:
@@ -5319,11 +5656,17 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
                 */
                if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
                        bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
-               else
+               else {
+                       bfa_fcs_vport_aen_post(&vport->lport,
+                                       BFA_LPORT_AEN_NPIV_FABRIC_MAX);
                        bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
+               }
                break;
 
        default:
+               if (vport->fdisc_retries == 0)
+                       bfa_fcs_vport_aen_post(&vport->lport,
+                                       BFA_LPORT_AEN_NPIV_UNKNOWN);
                bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
        }
 }
@@ -5391,7 +5734,10 @@ void
 bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport)
 {
        vport->vport_stats.fab_online++;
-       bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
+       if (bfa_fcs_fabric_npiv_capable(__vport_fabric(vport)))
+               bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
+       else
+               vport->vport_stats.fab_no_npiv++;
 }
 
 /*
@@ -5421,6 +5767,15 @@ bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport)
        bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
 }
 
+/*
+ * Stop completion callback from associated lport
+ */
+void
+bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport)
+{
+       bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOPCOMP);
+}
+
 /*
  * Delete completion callback from associated lport
  */
index caaee6f06937f132f8c7b4937f791d1e114e4719..52628d5d3c9b0663486833db47eda3d449a60aa9 100644 (file)
@@ -20,6 +20,7 @@
  */
 
 #include "bfad_drv.h"
+#include "bfad_im.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
 
@@ -262,6 +263,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
                break;
 
        case RPSM_EVENT_PLOGI_RCVD:
+       case RPSM_EVENT_PLOGI_COMP:
        case RPSM_EVENT_SCN:
                /*
                 * Ignore, SCN is possibly online notification.
@@ -470,6 +472,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
                break;
 
        case RPSM_EVENT_PRLO_RCVD:
+       case RPSM_EVENT_PLOGI_COMP:
                break;
 
        case RPSM_EVENT_LOGO_RCVD:
@@ -484,9 +487,9 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
                break;
 
        case RPSM_EVENT_PLOGI_RCVD:
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+               rport->plogi_pending = BFA_TRUE;
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
                bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
-               bfa_fcs_rport_send_plogiacc(rport, NULL);
                break;
 
        case RPSM_EVENT_DELETE:
@@ -891,6 +894,18 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
 
        switch (event) {
        case RPSM_EVENT_HCB_OFFLINE:
+               if (bfa_fcs_lport_is_online(rport->port) &&
+                   (rport->plogi_pending)) {
+                       rport->plogi_pending = BFA_FALSE;
+                       bfa_sm_set_state(rport,
+                               bfa_fcs_rport_sm_plogiacc_sending);
+                       bfa_fcs_rport_send_plogiacc(rport, NULL);
+                       break;
+               }
+               /*
+                * !! fall through !!
+                */
+
        case RPSM_EVENT_ADDRESS_CHANGE:
                if (bfa_fcs_lport_is_online(rport->port)) {
                        if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
@@ -921,6 +936,8 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
        case RPSM_EVENT_SCN:
        case RPSM_EVENT_LOGO_RCVD:
        case RPSM_EVENT_PRLO_RCVD:
+       case RPSM_EVENT_PLOGI_RCVD:
+       case RPSM_EVENT_LOGO_IMP:
                /*
                 * Ignore, already offline.
                 */
@@ -957,10 +974,18 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
                 */
                if (bfa_fcs_lport_is_online(rport->port) &&
                        (!BFA_FCS_PID_IS_WKA(rport->pid))) {
-                       bfa_sm_set_state(rport,
-                               bfa_fcs_rport_sm_nsdisc_sending);
-                       rport->ns_retries = 0;
-                       bfa_fcs_rport_send_nsdisc(rport, NULL);
+                       if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
+                               bfa_sm_set_state(rport,
+                                       bfa_fcs_rport_sm_nsdisc_sending);
+                               rport->ns_retries = 0;
+                               bfa_fcs_rport_send_nsdisc(rport, NULL);
+                       } else {
+                               /* For N2N  Direct Attach, try to re-login */
+                               bfa_sm_set_state(rport,
+                                       bfa_fcs_rport_sm_plogi_sending);
+                               rport->plogi_retries = 0;
+                               bfa_fcs_rport_send_plogi(rport, NULL);
+                       }
                } else {
                        /*
                         * if it is not a well known address, reset the
@@ -1356,7 +1381,8 @@ bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
        len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
                                bfa_fcs_lport_get_fcid(port), 0,
                                port->port_cfg.pwwn, port->port_cfg.nwwn,
-                               bfa_fcport_get_maxfrsize(port->fcs->bfa));
+                               bfa_fcport_get_maxfrsize(port->fcs->bfa),
+                               bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
 
        bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
                        FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response,
@@ -1476,7 +1502,8 @@ bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
                                 rport->pid, bfa_fcs_lport_get_fcid(port),
                                 rport->reply_oxid, port->port_cfg.pwwn,
                                 port->port_cfg.nwwn,
-                                bfa_fcport_get_maxfrsize(port->fcs->bfa));
+                                bfa_fcport_get_maxfrsize(port->fcs->bfa),
+                                bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
 
        bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
                        FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
@@ -2014,6 +2041,35 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
        kfree(rport->rp_drv);
 }
 
+static void
+bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
+                       enum bfa_rport_aen_event event,
+                       struct bfa_rport_aen_data_s *data)
+{
+       struct bfa_fcs_lport_s *port = rport->port;
+       struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
+       struct bfa_aen_entry_s  *aen_entry;
+
+       bfad_get_aen_entry(bfad, aen_entry);
+       if (!aen_entry)
+               return;
+
+       if (event == BFA_RPORT_AEN_QOS_PRIO)
+               aen_entry->aen_data.rport.priv.qos = data->priv.qos;
+       else if (event == BFA_RPORT_AEN_QOS_FLOWID)
+               aen_entry->aen_data.rport.priv.qos = data->priv.qos;
+
+       aen_entry->aen_data.rport.vf_id = rport->port->fabric->vf_id;
+       aen_entry->aen_data.rport.ppwwn = bfa_fcs_lport_get_pwwn(
+                                       bfa_fcs_get_base_port(rport->fcs));
+       aen_entry->aen_data.rport.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
+       aen_entry->aen_data.rport.rpwwn = rport->pwwn;
+
+       /* Send the AEN notification */
+       bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
+                                 BFA_AEN_CAT_RPORT, event);
+}
+
 static void
 bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
 {
@@ -2024,6 +2080,11 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
 
        rport->stats.onlines++;
 
+       if ((!rport->pid) || (!rport->pwwn)) {
+               bfa_trc(rport->fcs, rport->pid);
+               bfa_sm_fault(rport->fcs, rport->pid);
+       }
+
        if (bfa_fcs_lport_is_initiator(port)) {
                bfa_fcs_itnim_rport_online(rport->itnim);
                if (!BFA_FCS_PID_IS_WKA(rport->pid))
@@ -2032,10 +2093,12 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
 
        wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
        wwn2str(rpwwn_buf, rport->pwwn);
-       if (!BFA_FCS_PID_IS_WKA(rport->pid))
+       if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                "Remote port (WWN = %s) online for logical port (WWN = %s)\n",
                rpwwn_buf, lpwwn_buf);
+               bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL);
+       }
 }
 
 static void
@@ -2047,20 +2110,26 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
        char    rpwwn_buf[BFA_STRING_32];
 
        rport->stats.offlines++;
+       rport->plogi_pending = BFA_FALSE;
 
        wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
        wwn2str(rpwwn_buf, rport->pwwn);
        if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
-               if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE)
+               if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) {
                        BFA_LOG(KERN_ERR, bfad, bfa_log_level,
                                "Remote port (WWN = %s) connectivity lost for "
                                "logical port (WWN = %s)\n",
                                rpwwn_buf, lpwwn_buf);
-               else
+                       bfa_fcs_rport_aen_post(rport,
+                               BFA_RPORT_AEN_DISCONNECT, NULL);
+               } else {
                        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                                "Remote port (WWN = %s) offlined by "
                                "logical port (WWN = %s)\n",
                                rpwwn_buf, lpwwn_buf);
+                       bfa_fcs_rport_aen_post(rport,
+                               BFA_RPORT_AEN_OFFLINE, NULL);
+               }
        }
 
        if (bfa_fcs_lport_is_initiator(port)) {
@@ -2120,7 +2189,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
 
                port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred);
                bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
-                                         port->fabric->bb_credit);
+                                         port->fabric->bb_credit, 0);
        }
 
 }
@@ -2233,22 +2302,6 @@ bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
        bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
 }
 
-static int
-wwn_compare(wwn_t wwn1, wwn_t wwn2)
-{
-       u8              *b1 = (u8 *) &wwn1;
-       u8              *b2 = (u8 *) &wwn2;
-       int             i;
-
-       for (i = 0; i < sizeof(wwn_t); i++) {
-               if (b1[i] < b2[i])
-                       return -1;
-               if (b1[i] > b2[i])
-                       return 1;
-       }
-       return 0;
-}
-
 /*
  *     Called by bport/vport to handle PLOGI received from an existing
  *      remote port.
@@ -2266,19 +2319,8 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
        rport->reply_oxid = rx_fchs->ox_id;
        bfa_trc(rport->fcs, rport->reply_oxid);
 
-       /*
-        * In Switched fabric topology,
-        * PLOGI to each other. If our pwwn is smaller, ignore it,
-        * if it is not a well known address.
-        * If the link topology is N2N,
-        * this Plogi should be accepted.
-        */
-       if ((wwn_compare(rport->port->port_cfg.pwwn, rport->pwwn) == -1) &&
-               (bfa_fcs_fabric_is_switched(rport->port->fabric)) &&
-               (!BFA_FCS_PID_IS_WKA(rport->pid))) {
-               bfa_trc(rport->fcs, rport->pid);
-               return;
-       }
+       rport->pid = rx_fchs->s_id;
+       bfa_trc(rport->fcs, rport->pid);
 
        rport->stats.plogi_rcvd++;
        bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
@@ -2361,8 +2403,11 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
                struct bfa_rport_qos_attr_s new_qos_attr)
 {
        struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+       struct bfa_rport_aen_data_s aen_data;
 
        bfa_trc(rport->fcs, rport->pwwn);
+       aen_data.priv.qos = new_qos_attr;
+       bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
 }
 
 /*
@@ -2385,8 +2430,11 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
                struct bfa_rport_qos_attr_s new_qos_attr)
 {
        struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+       struct bfa_rport_aen_data_s aen_data;
 
        bfa_trc(rport->fcs, rport->pwwn);
+       aen_data.priv.qos = new_qos_attr;
+       bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data);
 }
 
 /*
@@ -2531,7 +2579,45 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id)
        bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD);
 }
 
-
+void
+bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
+               struct bfa_rport_attr_s *rport_attr)
+{
+       struct bfa_rport_qos_attr_s qos_attr;
+       struct bfa_fcs_lport_s *port = rport->port;
+       bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
+
+       memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
+       memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s));
+
+       rport_attr->pid = rport->pid;
+       rport_attr->pwwn = rport->pwwn;
+       rport_attr->nwwn = rport->nwwn;
+       rport_attr->cos_supported = rport->fc_cos;
+       rport_attr->df_sz = rport->maxfrsize;
+       rport_attr->state = bfa_fcs_rport_get_state(rport);
+       rport_attr->fc_cos = rport->fc_cos;
+       rport_attr->cisc = rport->cisc;
+       rport_attr->scsi_function = rport->scsi_function;
+       rport_attr->curr_speed  = rport->rpf.rpsc_speed;
+       rport_attr->assigned_speed  = rport->rpf.assigned_speed;
+
+       qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority;
+       qos_attr.qos_flow_id =
+               cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id);
+       rport_attr->qos_attr = qos_attr;
+
+       rport_attr->trl_enforced = BFA_FALSE;
+       if (bfa_fcport_is_ratelim(port->fcs->bfa) &&
+           (rport->scsi_function == BFA_RPORT_TARGET)) {
+               if (rport_speed == BFA_PORT_SPEED_UNKNOWN)
+                       rport_speed =
+                               bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
+
+               if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port))
+                       rport_attr->trl_enforced = BFA_TRUE;
+       }
+}
 
 /*
  * Remote port implementation.
index 977e681ec803090d2ec7acc7b0a2ed49147ea985..ea24d4c6e67afc025ce68e5363f8eda1771f335e 100644 (file)
 
 #include "bfad_drv.h"
 #include "bfa_modules.h"
-#include "bfi_cbreg.h"
+#include "bfi_reg.h"
 
 void
 bfa_hwcb_reginit(struct bfa_s *bfa)
 {
        struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
        void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
-       int                     i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
+       int     fn = bfa_ioc_pcifn(&bfa->ioc);
 
        if (fn == 0) {
                bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
@@ -33,29 +33,6 @@ bfa_hwcb_reginit(struct bfa_s *bfa)
                bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
                bfa_regs->intr_mask   = (kva + HOSTFN1_INT_MSK);
        }
-
-       for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
-               /*
-                * CPE registers
-                */
-               q = CPE_Q_NUM(fn, i);
-               bfa_regs->cpe_q_pi[i] = (kva + CPE_Q_PI(q));
-               bfa_regs->cpe_q_ci[i] = (kva + CPE_Q_CI(q));
-               bfa_regs->cpe_q_depth[i] = (kva + CPE_Q_DEPTH(q));
-
-               /*
-                * RME registers
-                */
-               q = CPE_Q_NUM(fn, i);
-               bfa_regs->rme_q_pi[i] = (kva + RME_Q_PI(q));
-               bfa_regs->rme_q_ci[i] = (kva + RME_Q_CI(q));
-               bfa_regs->rme_q_depth[i] = (kva + RME_Q_DEPTH(q));
-       }
-}
-
-void
-bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq)
-{
 }
 
 static void
@@ -65,16 +42,36 @@ bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
                        bfa->iocfc.bfa_regs.intr_status);
 }
 
-void
-bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
+/*
+ * Actions to respond RME Interrupt for Crossbow ASIC:
+ * - Write 1 to Interrupt Status register
+ *              INTX - done in bfa_intx()
+ *              MSIX - done in bfa_hwcb_rspq_ack_msix()
+ * - Update CI (only if new CI)
+ */
+static void
+bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci)
 {
+       writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
+               bfa->iocfc.bfa_regs.intr_status);
+
+       if (bfa_rspq_ci(bfa, rspq) == ci)
+               return;
+
+       bfa_rspq_ci(bfa, rspq) = ci;
+       writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+       mmiowb();
 }
 
-static void
-bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
+void
+bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
 {
-       writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
-                       bfa->iocfc.bfa_regs.intr_status);
+       if (bfa_rspq_ci(bfa, rspq) == ci)
+               return;
+
+       bfa_rspq_ci(bfa, rspq) = ci;
+       writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+       mmiowb();
 }
 
 void
@@ -103,44 +100,72 @@ bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
        *num_vecs = __HFN_NUMINTS;
 }
 
+/*
+ * Dummy interrupt handler for handling spurious interrupts.
+ */
+static void
+bfa_hwcb_msix_dummy(struct bfa_s *bfa, int vec)
+{
+}
+
 /*
  * No special setup required for crossbow -- vector assignments are implicit.
  */
 void
 bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
 {
-       int i;
-
        WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS));
 
        bfa->msix.nvecs = nvecs;
-       if (nvecs == 1) {
-               for (i = 0; i < BFA_MSIX_CB_MAX; i++)
+       bfa_hwcb_msix_uninstall(bfa);
+}
+
+void
+bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa)
+{
+       int i;
+
+       if (bfa->msix.nvecs == 0)
+               return;
+
+       if (bfa->msix.nvecs == 1) {
+               for (i = BFI_MSIX_CPE_QMIN_CB; i < BFI_MSIX_CB_MAX; i++)
                        bfa->msix.handler[i] = bfa_msix_all;
                return;
        }
 
-       for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q7; i++)
-               bfa->msix.handler[i] = bfa_msix_reqq;
-
-       for (i = BFA_MSIX_RME_Q0; i <= BFA_MSIX_RME_Q7; i++)
-               bfa->msix.handler[i] = bfa_msix_rspq;
-
-       for (; i < BFA_MSIX_CB_MAX; i++)
+       for (i = BFI_MSIX_RME_QMAX_CB+1; i < BFI_MSIX_CB_MAX; i++)
                bfa->msix.handler[i] = bfa_msix_lpu_err;
 }
 
-/*
- * Crossbow -- dummy, interrupts are masked
- */
 void
-bfa_hwcb_msix_install(struct bfa_s *bfa)
+bfa_hwcb_msix_queue_install(struct bfa_s *bfa)
 {
+       int i;
+
+       if (bfa->msix.nvecs == 0)
+               return;
+
+       if (bfa->msix.nvecs == 1) {
+               for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
+                       bfa->msix.handler[i] = bfa_msix_all;
+               return;
+       }
+
+       for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_CPE_QMAX_CB; i++)
+               bfa->msix.handler[i] = bfa_msix_reqq;
+
+       for (i = BFI_MSIX_RME_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
+               bfa->msix.handler[i] = bfa_msix_rspq;
 }
 
 void
 bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
 {
+       int i;
+
+       for (i = 0; i < BFI_MSIX_CB_MAX; i++)
+               bfa->msix.handler[i] = bfa_hwcb_msix_dummy;
 }
 
 /*
@@ -149,13 +174,18 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
 void
 bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
 {
-       bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
-       bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
+       if (msix) {
+               bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
+               bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
+       } else {
+               bfa->iocfc.hwif.hw_reqq_ack = NULL;
+               bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
+       }
 }
 
 void
 bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
 {
-       *start = BFA_MSIX_RME_Q0;
-       *end = BFA_MSIX_RME_Q7;
+       *start = BFI_MSIX_RME_QMIN_CB;
+       *end = BFI_MSIX_RME_QMAX_CB;
 }
index 21018d98a07bb002bffe063e4c8650b8f3cc6263..637527f48b400c741805dbdf9a2e5758cad64a7e 100644 (file)
 
 #include "bfad_drv.h"
 #include "bfa_modules.h"
-#include "bfi_ctreg.h"
+#include "bfi_reg.h"
 
 BFA_TRC_FILE(HAL, IOCFC_CT);
 
-static u32 __ct_msix_err_vec_reg[] = {
-       HOST_MSIX_ERR_INDEX_FN0,
-       HOST_MSIX_ERR_INDEX_FN1,
-       HOST_MSIX_ERR_INDEX_FN2,
-       HOST_MSIX_ERR_INDEX_FN3,
-};
-
-static void
-bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec)
-{
-       int fn = bfa_ioc_pcifn(&bfa->ioc);
-       void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
-
-       if (msix)
-               writel(vec, kva + __ct_msix_err_vec_reg[fn]);
-       else
-               writel(0, kva + __ct_msix_err_vec_reg[fn]);
-}
-
 /*
  * Dummy interrupt handler for handling spurious interrupt during chip-reinit.
  */
@@ -53,7 +34,7 @@ bfa_hwct_reginit(struct bfa_s *bfa)
 {
        struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
        void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
-       int                     i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
+       int     fn = bfa_ioc_pcifn(&bfa->ioc);
 
        if (fn == 0) {
                bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
@@ -62,26 +43,16 @@ bfa_hwct_reginit(struct bfa_s *bfa)
                bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
                bfa_regs->intr_mask   = (kva + HOSTFN1_INT_MSK);
        }
+}
 
-       for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
-               /*
-                * CPE registers
-                */
-               q = CPE_Q_NUM(fn, i);
-               bfa_regs->cpe_q_pi[i] = (kva + CPE_PI_PTR_Q(q << 5));
-               bfa_regs->cpe_q_ci[i] = (kva + CPE_CI_PTR_Q(q << 5));
-               bfa_regs->cpe_q_depth[i] = (kva + CPE_DEPTH_Q(q << 5));
-               bfa_regs->cpe_q_ctrl[i] = (kva + CPE_QCTRL_Q(q << 5));
-
-               /*
-                * RME registers
-                */
-               q = CPE_Q_NUM(fn, i);
-               bfa_regs->rme_q_pi[i] = (kva + RME_PI_PTR_Q(q << 5));
-               bfa_regs->rme_q_ci[i] = (kva + RME_CI_PTR_Q(q << 5));
-               bfa_regs->rme_q_depth[i] = (kva + RME_DEPTH_Q(q << 5));
-               bfa_regs->rme_q_ctrl[i] = (kva + RME_QCTRL_Q(q << 5));
-       }
+void
+bfa_hwct2_reginit(struct bfa_s *bfa)
+{
+       struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
+       void __iomem    *kva = bfa_ioc_bar0(&bfa->ioc);
+
+       bfa_regs->intr_status = (kva + CT2_HOSTFN_INT_STATUS);
+       bfa_regs->intr_mask   = (kva + CT2_HOSTFN_INTR_MASK);
 }
 
 void
@@ -93,22 +64,45 @@ bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
        writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
 }
 
+/*
+ * Actions to respond RME Interrupt for Catapult ASIC:
+ * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
+ * - Acknowledge by writing to RME Queue Control register
+ * - Update CI
+ */
 void
-bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq)
+bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
 {
        u32     r32;
 
        r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
        writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
+
+       bfa_rspq_ci(bfa, rspq) = ci;
+       writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+       mmiowb();
+}
+
+/*
+ * Actions to respond RME Interrupt for Catapult2 ASIC:
+ * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
+ * - Update CI
+ */
+void
+bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
+{
+       bfa_rspq_ci(bfa, rspq) = ci;
+       writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+       mmiowb();
 }
 
 void
 bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
                 u32 *num_vecs, u32 *max_vec_bit)
 {
-       *msix_vecs_bmap = (1 << BFA_MSIX_CT_MAX) - 1;
-       *max_vec_bit = (1 << (BFA_MSIX_CT_MAX - 1));
-       *num_vecs = BFA_MSIX_CT_MAX;
+       *msix_vecs_bmap = (1 << BFI_MSIX_CT_MAX) - 1;
+       *max_vec_bit = (1 << (BFI_MSIX_CT_MAX - 1));
+       *num_vecs = BFI_MSIX_CT_MAX;
 }
 
 /*
@@ -117,7 +111,7 @@ bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
 void
 bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
 {
-       WARN_ON((nvecs != 1) && (nvecs != BFA_MSIX_CT_MAX));
+       WARN_ON((nvecs != 1) && (nvecs != BFI_MSIX_CT_MAX));
        bfa_trc(bfa, nvecs);
 
        bfa->msix.nvecs = nvecs;
@@ -125,7 +119,19 @@ bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
 }
 
 void
-bfa_hwct_msix_install(struct bfa_s *bfa)
+bfa_hwct_msix_ctrl_install(struct bfa_s *bfa)
+{
+       if (bfa->msix.nvecs == 0)
+               return;
+
+       if (bfa->msix.nvecs == 1)
+               bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_all;
+       else
+               bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_lpu_err;
+}
+
+void
+bfa_hwct_msix_queue_install(struct bfa_s *bfa)
 {
        int i;
 
@@ -133,19 +139,16 @@ bfa_hwct_msix_install(struct bfa_s *bfa)
                return;
 
        if (bfa->msix.nvecs == 1) {
-               for (i = 0; i < BFA_MSIX_CT_MAX; i++)
+               for (i = BFI_MSIX_CPE_QMIN_CT; i < BFI_MSIX_CT_MAX; i++)
                        bfa->msix.handler[i] = bfa_msix_all;
                return;
        }
 
-       for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q3; i++)
+       for (i = BFI_MSIX_CPE_QMIN_CT; i <= BFI_MSIX_CPE_QMAX_CT; i++)
                bfa->msix.handler[i] = bfa_msix_reqq;
 
-       for (; i <= BFA_MSIX_RME_Q3; i++)
+       for (i = BFI_MSIX_RME_QMIN_CT; i <= BFI_MSIX_RME_QMAX_CT; i++)
                bfa->msix.handler[i] = bfa_msix_rspq;
-
-       WARN_ON(i != BFA_MSIX_LPU_ERR);
-       bfa->msix.handler[BFA_MSIX_LPU_ERR] = bfa_msix_lpu_err;
 }
 
 void
@@ -153,7 +156,7 @@ bfa_hwct_msix_uninstall(struct bfa_s *bfa)
 {
        int i;
 
-       for (i = 0; i < BFA_MSIX_CT_MAX; i++)
+       for (i = 0; i < BFI_MSIX_CT_MAX; i++)
                bfa->msix.handler[i] = bfa_hwct_msix_dummy;
 }
 
@@ -164,13 +167,12 @@ void
 bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
 {
        bfa_trc(bfa, 0);
-       bfa_hwct_msix_lpu_err_set(bfa, msix, BFA_MSIX_LPU_ERR);
        bfa_ioc_isr_mode_set(&bfa->ioc, msix);
 }
 
 void
 bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
 {
-       *start = BFA_MSIX_RME_Q0;
-       *end = BFA_MSIX_RME_Q3;
+       *start = BFI_MSIX_RME_QMIN_CT;
+       *end = BFI_MSIX_RME_QMAX_CT;
 }
index 6c7e0339dda44d0d7029cd83bd2700950e1c49d2..1ac5aecf25a68a14c2c98011cf1af357b60ead26 100644 (file)
@@ -16,8 +16,9 @@
  */
 
 #include "bfad_drv.h"
+#include "bfad_im.h"
 #include "bfa_ioc.h"
-#include "bfi_ctreg.h"
+#include "bfi_reg.h"
 #include "bfa_defs.h"
 #include "bfa_defs_svc.h"
 
@@ -29,8 +30,8 @@ BFA_TRC_FILE(CNA, IOC);
 #define BFA_IOC_TOV            3000    /* msecs */
 #define BFA_IOC_HWSEM_TOV      500     /* msecs */
 #define BFA_IOC_HB_TOV         500     /* msecs */
-#define BFA_IOC_HWINIT_MAX     5
 #define BFA_IOC_TOV_RECOVER     BFA_IOC_HB_TOV
+#define BFA_IOC_POLL_TOV       BFA_TIMER_FREQ
 
 #define bfa_ioc_timer_start(__ioc)                                     \
        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
@@ -79,14 +80,17 @@ bfa_boolean_t bfa_auto_recover = BFA_TRUE;
 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
 static void bfa_ioc_timeout(void *ioc);
+static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
-static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
+static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
+                               enum bfa_ioc_event_e event);
 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
 static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
@@ -105,11 +109,12 @@ enum ioc_event {
        IOC_E_ENABLED           = 5,    /*  f/w enabled         */
        IOC_E_FWRSP_GETATTR     = 6,    /*  IOC get attribute response  */
        IOC_E_DISABLED          = 7,    /*  f/w disabled                */
-       IOC_E_INITFAILED        = 8,    /*  failure notice by iocpf sm  */
-       IOC_E_PFFAILED          = 9,    /*  failure notice by iocpf sm  */
-       IOC_E_HBFAIL            = 10,   /*  heartbeat failure           */
-       IOC_E_HWERROR           = 11,   /*  hardware error interrupt    */
-       IOC_E_TIMEOUT           = 12,   /*  timeout                     */
+       IOC_E_PFFAILED          = 8,    /*  failure notice by iocpf sm  */
+       IOC_E_HBFAIL            = 9,    /*  heartbeat failure           */
+       IOC_E_HWERROR           = 10,   /*  hardware error interrupt    */
+       IOC_E_TIMEOUT           = 11,   /*  timeout                     */
+       IOC_E_HWFAILED          = 12,   /*  PCI mapping failure notice  */
+       IOC_E_FWRSP_ACQ_ADDR    = 13,   /*  Acquiring address           */
 };
 
 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
@@ -121,6 +126,8 @@ bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event);
 
 static struct bfa_sm_table_s ioc_sm_table[] = {
        {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
@@ -132,6 +139,8 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
        {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
        {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
        {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
+       {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
+       {BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR},
 };
 
 /*
@@ -143,9 +152,9 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
                        bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
 #define bfa_iocpf_timer_stop(__ioc)    bfa_timer_stop(&(__ioc)->ioc_timer)
 
-#define bfa_iocpf_recovery_timer_start(__ioc)                          \
+#define bfa_iocpf_poll_timer_start(__ioc)                              \
        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
-                       bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
+                       bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
 
 #define bfa_sem_timer_start(__ioc)                                     \
        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,        \
@@ -157,6 +166,7 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
  */
 static void bfa_iocpf_timeout(void *ioc_arg);
 static void bfa_iocpf_sem_timeout(void *ioc_arg);
+static void bfa_iocpf_poll_timeout(void *ioc_arg);
 
 /*
  * IOCPF state machine events
@@ -173,6 +183,7 @@ enum iocpf_event {
        IOCPF_E_GETATTRFAIL     = 9,    /*  init fail notice by ioc sm  */
        IOCPF_E_SEMLOCKED       = 10,   /*  h/w semaphore is locked     */
        IOCPF_E_TIMEOUT         = 11,   /*  f/w response timeout        */
+       IOCPF_E_SEM_ERROR       = 12,   /*  h/w sem mapping error       */
 };
 
 /*
@@ -314,11 +325,16 @@ bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
                /* !!! fall through !!! */
        case IOC_E_HWERROR:
                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
-               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
                if (event != IOC_E_PFFAILED)
                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
                break;
 
+       case IOC_E_HWFAILED:
+               ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+               break;
+
        case IOC_E_DISABLE:
                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
                break;
@@ -356,17 +372,23 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
        case IOC_E_FWRSP_GETATTR:
                bfa_ioc_timer_stop(ioc);
                bfa_ioc_check_attr_wwns(ioc);
+               bfa_ioc_hb_monitor(ioc);
                bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
                break;
 
+       case IOC_E_FWRSP_ACQ_ADDR:
+               bfa_ioc_timer_stop(ioc);
+               bfa_ioc_hb_monitor(ioc);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr);
                break;
+
        case IOC_E_PFFAILED:
        case IOC_E_HWERROR:
                bfa_ioc_timer_stop(ioc);
                /* !!! fall through !!! */
        case IOC_E_TIMEOUT:
                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
-               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
                if (event != IOC_E_PFFAILED)
                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
                break;
@@ -384,6 +406,50 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
        }
 }
 
+/*
+ * Acquiring address from fabric (entry function)
+ */
+static void
+bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc)
+{
+}
+
+/*
+ *     Acquiring address from the fabric
+ */
+static void
+bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+       bfa_trc(ioc, event);
+
+       switch (event) {
+       case IOC_E_FWRSP_GETATTR:
+               bfa_ioc_check_attr_wwns(ioc);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
+               break;
+
+       case IOC_E_PFFAILED:
+       case IOC_E_HWERROR:
+               bfa_hb_timer_stop(ioc);
+       case IOC_E_HBFAIL:
+               ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+               if (event != IOC_E_PFFAILED)
+                       bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
+               break;
+
+       case IOC_E_DISABLE:
+               bfa_hb_timer_stop(ioc);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+               break;
+
+       case IOC_E_ENABLE:
+               break;
+
+       default:
+               bfa_sm_fault(ioc, event);
+       }
+}
 
 static void
 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
@@ -391,8 +457,9 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 
        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
-       bfa_ioc_hb_monitor(ioc);
+       bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
        BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
+       bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
 }
 
 static void
@@ -414,13 +481,13 @@ bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
                bfa_hb_timer_stop(ioc);
                /* !!! fall through !!! */
        case IOC_E_HBFAIL:
-               bfa_ioc_fail_notify(ioc);
-
                if (ioc->iocpf.auto_recover)
                        bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
                else
                        bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 
+               bfa_ioc_fail_notify(ioc);
+
                if (event != IOC_E_PFFAILED)
                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
                break;
@@ -437,6 +504,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
        BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
+       bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
 }
 
 /*
@@ -461,6 +529,11 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
                break;
 
+       case IOC_E_HWFAILED:
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+               bfa_ioc_disable_comp(ioc);
+               break;
+
        default:
                bfa_sm_fault(ioc, event);
        }
@@ -525,12 +598,14 @@ bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
                 * Initialization retry failed.
                 */
                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
                if (event != IOC_E_PFFAILED)
                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
                break;
 
-       case IOC_E_INITFAILED:
-               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+       case IOC_E_HWFAILED:
+               ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
                break;
 
        case IOC_E_ENABLE:
@@ -590,6 +665,35 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
        }
 }
 
+static void
+bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
+{
+       bfa_trc(ioc, 0);
+}
+
+static void
+bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+       bfa_trc(ioc, event);
+
+       switch (event) {
+       case IOC_E_ENABLE:
+               ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+               break;
+
+       case IOC_E_DISABLE:
+               ioc->cbfn->disable_cbfn(ioc->bfa);
+               break;
+
+       case IOC_E_DETACH:
+               bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+               break;
+
+       default:
+               bfa_sm_fault(ioc, event);
+       }
+}
+
 /*
  * IOCPF State Machine
  */
@@ -600,7 +704,7 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
 static void
 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
 {
-       iocpf->retry_count = 0;
+       iocpf->fw_mismatch_notified = BFA_FALSE;
        iocpf->auto_recover = bfa_auto_recover;
 }
 
@@ -633,6 +737,28 @@ bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 static void
 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
 {
+       struct bfi_ioc_image_hdr_s      fwhdr;
+       u32     fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
+
+       /* h/w sem init */
+       if (fwstate == BFI_IOC_UNINIT)
+               goto sem_get;
+
+       bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
+
+       if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
+               goto sem_get;
+
+       bfa_trc(iocpf->ioc, fwstate);
+       bfa_trc(iocpf->ioc, fwhdr.exec);
+       writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
+
+       /*
+        * Try to lock and then unlock the semaphore.
+        */
+       readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
+       writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
+sem_get:
        bfa_ioc_hw_sem_get(iocpf->ioc);
 }
 
@@ -650,7 +776,6 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
        case IOCPF_E_SEMLOCKED:
                if (bfa_ioc_firmware_lock(ioc)) {
                        if (bfa_ioc_sync_start(ioc)) {
-                               iocpf->retry_count = 0;
                                bfa_ioc_sync_join(ioc);
                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
                        } else {
@@ -664,6 +789,11 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
                }
                break;
 
+       case IOCPF_E_SEM_ERROR:
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+               bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+               break;
+
        case IOCPF_E_DISABLE:
                bfa_sem_timer_stop(ioc);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
@@ -689,10 +819,10 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
        /*
         * Call only the first time sm enters fwmismatch state.
         */
-       if (iocpf->retry_count == 0)
+       if (iocpf->fw_mismatch_notified == BFA_FALSE)
                bfa_ioc_pf_fwmismatch(iocpf->ioc);
 
-       iocpf->retry_count++;
+       iocpf->fw_mismatch_notified = BFA_TRUE;
        bfa_iocpf_timer_start(iocpf->ioc);
 }
 
@@ -757,6 +887,11 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
                }
                break;
 
+       case IOCPF_E_SEM_ERROR:
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+               bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+               break;
+
        case IOCPF_E_DISABLE:
                bfa_sem_timer_stop(ioc);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
@@ -770,7 +905,7 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 static void
 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
 {
-       bfa_iocpf_timer_start(iocpf->ioc);
+       iocpf->poll_time = 0;
        bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
 }
 
@@ -787,20 +922,12 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 
        switch (event) {
        case IOCPF_E_FWREADY:
-               bfa_iocpf_timer_stop(ioc);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
                break;
 
-       case IOCPF_E_INITFAIL:
-               bfa_iocpf_timer_stop(ioc);
-               /*
-                * !!! fall through !!!
-                */
-
        case IOCPF_E_TIMEOUT:
                writel(1, ioc->ioc_regs.ioc_sem_reg);
-               if (event == IOCPF_E_TIMEOUT)
-                       bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
+               bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
                break;
 
@@ -820,6 +947,10 @@ static void
 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
 {
        bfa_iocpf_timer_start(iocpf->ioc);
+       /*
+        * Enable Interrupts before sending fw IOC ENABLE cmd.
+        */
+       iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
        bfa_ioc_send_enable(iocpf->ioc);
 }
 
@@ -860,10 +991,6 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
                break;
 
-       case IOCPF_E_FWREADY:
-               bfa_ioc_send_enable(ioc);
-               break;
-
        default:
                bfa_sm_fault(ioc, event);
        }
@@ -895,16 +1022,6 @@ bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
                break;
 
-       case IOCPF_E_FWREADY:
-               if (bfa_ioc_is_operational(ioc)) {
-                       bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
-                       bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
-               } else {
-                       bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
-                       bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
-               }
-               break;
-
        default:
                bfa_sm_fault(ioc, event);
        }
@@ -929,7 +1046,6 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 
        switch (event) {
        case IOCPF_E_FWRSP_DISABLE:
-       case IOCPF_E_FWREADY:
                bfa_iocpf_timer_stop(ioc);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
                break;
@@ -976,6 +1092,11 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
                break;
 
+       case IOCPF_E_SEM_ERROR:
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+               bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+               break;
+
        case IOCPF_E_FAIL:
                break;
 
@@ -990,6 +1111,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 static void
 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
 {
+       bfa_ioc_mbox_flush(iocpf->ioc);
        bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
 }
 
@@ -1002,7 +1124,6 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 
        switch (event) {
        case IOCPF_E_ENABLE:
-               iocpf->retry_count = 0;
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
                break;
 
@@ -1019,6 +1140,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 static void
 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
 {
+       bfa_ioc_debug_save_ftrc(iocpf->ioc);
        bfa_ioc_hw_sem_get(iocpf->ioc);
 }
 
@@ -1035,20 +1157,15 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
        switch (event) {
        case IOCPF_E_SEMLOCKED:
                bfa_ioc_notify_fail(ioc);
-               bfa_ioc_sync_ack(ioc);
-               iocpf->retry_count++;
-               if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
-                       bfa_ioc_sync_leave(ioc);
-                       writel(1, ioc->ioc_regs.ioc_sem_reg);
-                       bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
-               } else {
-                       if (bfa_ioc_sync_complete(ioc))
-                               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
-                       else {
-                               writel(1, ioc->ioc_regs.ioc_sem_reg);
-                               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
-                       }
-               }
+               bfa_ioc_sync_leave(ioc);
+               writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+               writel(1, ioc->ioc_regs.ioc_sem_reg);
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+               break;
+
+       case IOCPF_E_SEM_ERROR:
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+               bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
                break;
 
        case IOCPF_E_DISABLE:
@@ -1073,7 +1190,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 static void
 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
 {
-       bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED);
+       bfa_trc(iocpf->ioc, 0);
 }
 
 /*
@@ -1112,7 +1229,7 @@ bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
        /*
         * Flush any queued up mailbox requests.
         */
-       bfa_ioc_mbox_hbfail(iocpf->ioc);
+       bfa_ioc_mbox_flush(iocpf->ioc);
 
        bfa_ioc_hw_sem_get(iocpf->ioc);
 }
@@ -1126,11 +1243,11 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 
        switch (event) {
        case IOCPF_E_SEMLOCKED:
-               iocpf->retry_count = 0;
                bfa_ioc_sync_ack(ioc);
                bfa_ioc_notify_fail(ioc);
                if (!iocpf->auto_recover) {
                        bfa_ioc_sync_leave(ioc);
+                       writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
                        writel(1, ioc->ioc_regs.ioc_sem_reg);
                        bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
                } else {
@@ -1143,6 +1260,11 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
                }
                break;
 
+       case IOCPF_E_SEM_ERROR:
+               bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+               bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+               break;
+
        case IOCPF_E_DISABLE:
                bfa_sem_timer_stop(ioc);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
@@ -1159,6 +1281,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 static void
 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
 {
+       bfa_trc(iocpf->ioc, 0);
 }
 
 /*
@@ -1185,23 +1308,28 @@ bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  *  BFA IOC private functions
  */
 
+/*
+ * Notify common modules registered for notification.
+ */
 static void
-bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
+bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
 {
-       struct list_head                        *qe;
-       struct bfa_ioc_hbfail_notify_s  *notify;
-
-       ioc->cbfn->disable_cbfn(ioc->bfa);
+       struct bfa_ioc_notify_s *notify;
+       struct list_head        *qe;
 
-       /*
-        * Notify common modules registered for notification.
-        */
-       list_for_each(qe, &ioc->hb_notify_q) {
-               notify = (struct bfa_ioc_hbfail_notify_s *) qe;
-               notify->cbfn(notify->cbarg);
+       list_for_each(qe, &ioc->notify_q) {
+               notify = (struct bfa_ioc_notify_s *)qe;
+               notify->cbfn(notify->cbarg, event);
        }
 }
 
+static void
+bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
+{
+       ioc->cbfn->disable_cbfn(ioc->bfa);
+       bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
+}
+
 bfa_boolean_t
 bfa_ioc_sem_get(void __iomem *sem_reg)
 {
@@ -1211,16 +1339,15 @@ bfa_ioc_sem_get(void __iomem *sem_reg)
 
        r32 = readl(sem_reg);
 
-       while (r32 && (cnt < BFA_SEM_SPINCNT)) {
+       while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
                cnt++;
                udelay(2);
                r32 = readl(sem_reg);
        }
 
-       if (r32 == 0)
+       if (!(r32 & 1))
                return BFA_TRUE;
 
-       WARN_ON(cnt >= BFA_SEM_SPINCNT);
        return BFA_FALSE;
 }
 
@@ -1234,7 +1361,12 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
         * will return 1. Semaphore is released by writing 1 to the register
         */
        r32 = readl(ioc->ioc_regs.ioc_sem_reg);
-       if (r32 == 0) {
+       if (r32 == ~0) {
+               WARN_ON(r32 == ~0);
+               bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
+               return;
+       }
+       if (!(r32 & 1)) {
                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
                return;
        }
@@ -1343,7 +1475,7 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
        int i;
 
        drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
-               bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+               bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
 
        for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
                if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
@@ -1369,7 +1501,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
 
        bfa_ioc_fwver_get(ioc, &fwhdr);
        drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
-               bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+               bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
 
        if (fwhdr.signature != drv_fwhdr->signature) {
                bfa_trc(ioc, fwhdr.signature);
@@ -1377,8 +1509,8 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
                return BFA_FALSE;
        }
 
-       if (swab32(fwhdr.param) != boot_env) {
-               bfa_trc(ioc, fwhdr.param);
+       if (swab32(fwhdr.bootenv) != boot_env) {
+               bfa_trc(ioc, fwhdr.bootenv);
                bfa_trc(ioc, boot_env);
                return BFA_FALSE;
        }
@@ -1414,8 +1546,8 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
 
        bfa_trc(ioc, ioc_fwstate);
 
-       boot_type = BFI_BOOT_TYPE_NORMAL;
-       boot_env = BFI_BOOT_LOADER_OS;
+       boot_type = BFI_FWBOOT_TYPE_NORMAL;
+       boot_env = BFI_FWBOOT_ENV_OS;
 
        /*
         * check if firmware is valid
@@ -1425,6 +1557,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
 
        if (!fwvalid) {
                bfa_ioc_boot(ioc, boot_type, boot_env);
+               bfa_ioc_poll_fwinit(ioc);
                return;
        }
 
@@ -1433,7 +1566,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
         * just wait for an initialization completion interrupt.
         */
        if (ioc_fwstate == BFI_IOC_INITING) {
-               ioc->cbfn->reset_cbfn(ioc->bfa);
+               bfa_ioc_poll_fwinit(ioc);
                return;
        }
 
@@ -1452,7 +1585,6 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
                 * be flushed. Otherwise MSI-X interrupts are not delivered.
                 */
                bfa_ioc_msgflush(ioc);
-               ioc->cbfn->reset_cbfn(ioc->bfa);
                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
                return;
        }
@@ -1461,6 +1593,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
         * Initialize the h/w for any other states.
         */
        bfa_ioc_boot(ioc, boot_type, boot_env);
+       bfa_ioc_poll_fwinit(ioc);
 }
 
 static void
@@ -1508,7 +1641,7 @@ bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
 
        bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
                    bfa_ioc_portid(ioc));
-       enable_req.ioc_class = ioc->ioc_mc;
+       enable_req.clscode = cpu_to_be16(ioc->clscode);
        do_gettimeofday(&tv);
        enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
        bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
@@ -1572,25 +1705,26 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
        u32 loff = 0;
        u32 chunkno = 0;
        u32 i;
+       u32 asicmode;
 
        /*
         * Initialize LMEM first before code download
         */
        bfa_ioc_lmem_init(ioc);
 
-       bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
-       fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
+       bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
+       fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
 
        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
        pgoff = PSS_SMEM_PGOFF(loff);
 
        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 
-       for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
+       for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
 
                if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
                        chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
-                       fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
+                       fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
                                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
                }
 
@@ -1616,11 +1750,15 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
                        ioc->ioc_regs.host_page_num_fn);
 
        /*
-        * Set boot type and boot param at the end.
-       */
-       bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
+        * Set boot type and device mode at the end.
+        */
+       asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
+                               ioc->port0_mode, ioc->port1_mode);
+       bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
+                       swab32(asicmode));
+       bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
                        swab32(boot_type));
-       bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
+       bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
                        swab32(boot_env));
 }
 
@@ -1636,6 +1774,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
        attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
        attr->card_type     = be32_to_cpu(attr->card_type);
        attr->maxfrsize     = be16_to_cpu(attr->maxfrsize);
+       ioc->fcmode     = (attr->port_mode == BFI_PORT_MODE_FC);
 
        bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
 }
@@ -1690,7 +1829,7 @@ bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
  * Cleanup any pending requests.
  */
 static void
-bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
+bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
 {
        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
        struct bfa_mbox_cmd_s           *cmd;
@@ -1752,6 +1891,7 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
        /*
         *  release semaphore.
         */
+       readl(ioc->ioc_regs.ioc_init_sem_reg);
        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
 
        bfa_trc(ioc, pgnum);
@@ -1808,6 +1948,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
        /*
         *  release semaphore.
         */
+       readl(ioc->ioc_regs.ioc_init_sem_reg);
        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
        bfa_trc(ioc, pgnum);
        return BFA_STATUS_OK;
@@ -1816,23 +1957,19 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
 static void
 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
 {
-       struct list_head                *qe;
-       struct bfa_ioc_hbfail_notify_s  *notify;
        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 
        /*
         * Notify driver and common modules registered for notification.
         */
        ioc->cbfn->hbfail_cbfn(ioc->bfa);
-       list_for_each(qe, &ioc->hb_notify_q) {
-               notify = (struct bfa_ioc_hbfail_notify_s *) qe;
-               notify->cbfn(notify->cbarg);
-       }
+       bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
 
        bfa_ioc_debug_save_ftrc(ioc);
 
        BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
                "Heart Beat of IOC has failed\n");
+       bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
 
 }
 
@@ -1847,6 +1984,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
        BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
                "Running firmware version is incompatible "
                "with the driver version\n");
+       bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
 }
 
 bfa_status_t
@@ -1864,6 +2002,7 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
        /*
         *  release semaphore.
         */
+       readl(ioc->ioc_regs.ioc_init_sem_reg);
        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
 
        return BFA_STATUS_OK;
@@ -1876,8 +2015,6 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
 void
 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
 {
-       void __iomem *rb;
-
        bfa_ioc_stats(ioc, ioc_boots);
 
        if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
@@ -1886,22 +2023,16 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
        /*
         * Initialize IOC state of all functions on a chip reset.
         */
-       rb = ioc->pcidev.pci_bar_kva;
-       if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
-               writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
-               writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
+       if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
+               writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
+               writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
        } else {
-               writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
-               writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
+               writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
+               writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
        }
 
        bfa_ioc_msgflush(ioc);
        bfa_ioc_download_fw(ioc, boot_type, boot_env);
-
-       /*
-        * Enable interrupts just before starting LPU
-        */
-       ioc->cbfn->reset_cbfn(ioc->bfa);
        bfa_ioc_lpu_start(ioc);
 }
 
@@ -1932,13 +2063,17 @@ bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
                (r32 != BFI_IOC_MEMTEST));
 }
 
-void
+bfa_boolean_t
 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
 {
        __be32  *msgp = mbmsg;
        u32     r32;
        int             i;
 
+       r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
+       if ((r32 & 1) == 0)
+               return BFA_FALSE;
+
        /*
         * read the MBOX msg
         */
@@ -1954,6 +2089,8 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
         */
        writel(1, ioc->ioc_regs.lpu_mbox_cmd);
        readl(ioc->ioc_regs.lpu_mbox_cmd);
+
+       return BFA_TRUE;
 }
 
 void
@@ -1970,11 +2107,10 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
        case BFI_IOC_I2H_HBEAT:
                break;
 
-       case BFI_IOC_I2H_READY_EVENT:
-               bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
-               break;
-
        case BFI_IOC_I2H_ENABLE_REPLY:
+               ioc->port_mode = ioc->port_mode_cfg =
+                               (enum bfa_mode_s)msg->fw_event.port_mode;
+               ioc->ad_cap_bm = msg->fw_event.cap_bm;
                bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
                break;
 
@@ -1986,6 +2122,10 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
                bfa_ioc_getattr_reply(ioc);
                break;
 
+       case BFI_IOC_I2H_ACQ_ADDR_REPLY:
+               bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR);
+               break;
+
        default:
                bfa_trc(ioc, msg->mh.msg_id);
                WARN_ON(1);
@@ -2011,7 +2151,7 @@ bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
        ioc->iocpf.ioc  = ioc;
 
        bfa_ioc_mbox_attach(ioc);
-       INIT_LIST_HEAD(&ioc->hb_notify_q);
+       INIT_LIST_HEAD(&ioc->notify_q);
 
        bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
        bfa_fsm_send_event(ioc, IOC_E_RESET);
@@ -2024,6 +2164,7 @@ void
 bfa_ioc_detach(struct bfa_ioc_s *ioc)
 {
        bfa_fsm_send_event(ioc, IOC_E_DETACH);
+       INIT_LIST_HEAD(&ioc->notify_q);
 }
 
 /*
@@ -2033,20 +2174,80 @@ bfa_ioc_detach(struct bfa_ioc_s *ioc)
  */
 void
 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
-                enum bfi_mclass mc)
+               enum bfi_pcifn_class clscode)
 {
-       ioc->ioc_mc     = mc;
+       ioc->clscode    = clscode;
        ioc->pcidev     = *pcidev;
-       ioc->ctdev      = bfa_asic_id_ct(ioc->pcidev.device_id);
-       ioc->cna        = ioc->ctdev && !ioc->fcmode;
+
+       /*
+        * Initialize IOC and device personality
+        */
+       ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
+       ioc->asic_mode  = BFI_ASIC_MODE_FC;
+
+       switch (pcidev->device_id) {
+       case BFA_PCI_DEVICE_ID_FC_8G1P:
+       case BFA_PCI_DEVICE_ID_FC_8G2P:
+               ioc->asic_gen = BFI_ASIC_GEN_CB;
+               ioc->fcmode = BFA_TRUE;
+               ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+               ioc->ad_cap_bm = BFA_CM_HBA;
+               break;
+
+       case BFA_PCI_DEVICE_ID_CT:
+               ioc->asic_gen = BFI_ASIC_GEN_CT;
+               ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
+               ioc->asic_mode  = BFI_ASIC_MODE_ETH;
+               ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
+               ioc->ad_cap_bm = BFA_CM_CNA;
+               break;
+
+       case BFA_PCI_DEVICE_ID_CT_FC:
+               ioc->asic_gen = BFI_ASIC_GEN_CT;
+               ioc->fcmode = BFA_TRUE;
+               ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+               ioc->ad_cap_bm = BFA_CM_HBA;
+               break;
+
+       case BFA_PCI_DEVICE_ID_CT2:
+               ioc->asic_gen = BFI_ASIC_GEN_CT2;
+               if (clscode == BFI_PCIFN_CLASS_FC &&
+                   pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
+                       ioc->asic_mode  = BFI_ASIC_MODE_FC16;
+                       ioc->fcmode = BFA_TRUE;
+                       ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+                       ioc->ad_cap_bm = BFA_CM_HBA;
+               } else {
+                       ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
+                       ioc->asic_mode  = BFI_ASIC_MODE_ETH;
+                       if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
+                               ioc->port_mode =
+                               ioc->port_mode_cfg = BFA_MODE_CNA;
+                               ioc->ad_cap_bm = BFA_CM_CNA;
+                       } else {
+                               ioc->port_mode =
+                               ioc->port_mode_cfg = BFA_MODE_NIC;
+                               ioc->ad_cap_bm = BFA_CM_NIC;
+                       }
+               }
+               break;
+
+       default:
+               WARN_ON(1);
+       }
 
        /*
         * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
         */
-       if (ioc->ctdev)
-               bfa_ioc_set_ct_hwif(ioc);
-       else
+       if (ioc->asic_gen == BFI_ASIC_GEN_CB)
                bfa_ioc_set_cb_hwif(ioc);
+       else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
+               bfa_ioc_set_ct_hwif(ioc);
+       else {
+               WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
+               bfa_ioc_set_ct2_hwif(ioc);
+               bfa_ioc_ct2_poweron(ioc);
+       }
 
        bfa_ioc_map_port(ioc);
        bfa_ioc_reg_init(ioc);
@@ -2172,36 +2373,38 @@ bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
        struct bfi_mbmsg_s              m;
        int                             mc;
 
-       bfa_ioc_msgget(ioc, &m);
+       if (bfa_ioc_msgget(ioc, &m)) {
+               /*
+                * Treat IOC message class as special.
+                */
+               mc = m.mh.msg_class;
+               if (mc == BFI_MC_IOC) {
+                       bfa_ioc_isr(ioc, &m);
+                       return;
+               }
+
+               if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
+                       return;
 
-       /*
-        * Treat IOC message class as special.
-        */
-       mc = m.mh.msg_class;
-       if (mc == BFI_MC_IOC) {
-               bfa_ioc_isr(ioc, &m);
-               return;
+               mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
        }
 
-       if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
-               return;
+       bfa_ioc_lpu_read_stat(ioc);
 
-       mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
+       /*
+        * Try to send pending mailbox commands
+        */
+       bfa_ioc_mbox_poll(ioc);
 }
 
 void
 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
 {
+       bfa_ioc_stats(ioc, ioc_hbfails);
+       ioc->stats.hb_count = ioc->hb_count;
        bfa_fsm_send_event(ioc, IOC_E_HWERROR);
 }
 
-void
-bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
-{
-       ioc->fcmode  = BFA_TRUE;
-       ioc->port_id = bfa_ioc_pcifn(ioc);
-}
-
 /*
  * return true if IOC is disabled
  */
@@ -2212,6 +2415,15 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
                bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
 }
 
+/*
+ * Return TRUE if IOC is in acquiring address state
+ */
+bfa_boolean_t
+bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc)
+{
+       return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr);
+}
+
 /*
  * return true if IOC firmware is different.
  */
@@ -2239,17 +2451,16 @@ bfa_boolean_t
 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
 {
        u32     ioc_state;
-       void __iomem *rb = ioc->pcidev.pci_bar_kva;
 
        if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
                return BFA_FALSE;
 
-       ioc_state = readl(rb + BFA_IOC0_STATE_REG);
+       ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
        if (!bfa_ioc_state_disabled(ioc_state))
                return BFA_FALSE;
 
        if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
-               ioc_state = readl(rb + BFA_IOC1_STATE_REG);
+               ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
                if (!bfa_ioc_state_disabled(ioc_state))
                        return BFA_FALSE;
        }
@@ -2308,24 +2519,21 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
 
        bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
 
-       ad_attr->cna_capable = ioc->cna;
-       ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna &&
-                               !ad_attr->is_mezz;
+       ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
+       ad_attr->trunk_capable = (ad_attr->nports > 1) &&
+                                 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
 }
 
 enum bfa_ioc_type_e
 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
 {
-       if (!ioc->ctdev || ioc->fcmode)
-               return BFA_IOC_TYPE_FC;
-       else if (ioc->ioc_mc == BFI_MC_IOCFC)
-               return BFA_IOC_TYPE_FCoE;
-       else if (ioc->ioc_mc == BFI_MC_LL)
-               return BFA_IOC_TYPE_LL;
-       else {
-               WARN_ON(ioc->ioc_mc != BFI_MC_LL);
+       if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
                return BFA_IOC_TYPE_LL;
-       }
+
+       WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
+
+       return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
+               ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
 }
 
 void
@@ -2384,11 +2592,8 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
 
        ioc_attr = ioc->attr;
 
-       /*
-        * model name
-        */
        snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
-               BFA_MFG_NAME, ioc_attr->card_type);
+                       BFA_MFG_NAME, ioc_attr->card_type);
 }
 
 enum bfa_ioc_state
@@ -2438,6 +2643,9 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
 
        ioc_attr->state = bfa_ioc_get_state(ioc);
        ioc_attr->port_id = ioc->port_id;
+       ioc_attr->port_mode = ioc->port_mode;
+       ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
+       ioc_attr->cap_bm = ioc->ad_cap_bm;
 
        ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
 
@@ -2475,10 +2683,41 @@ bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
        return m;
 }
 
-bfa_boolean_t
-bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
+/*
+ * Send AEN notification
+ */
+void
+bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
 {
-       return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
+       struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+       struct bfa_aen_entry_s  *aen_entry;
+       enum bfa_ioc_type_e ioc_type;
+
+       bfad_get_aen_entry(bfad, aen_entry);
+       if (!aen_entry)
+               return;
+
+       ioc_type = bfa_ioc_get_type(ioc);
+       switch (ioc_type) {
+       case BFA_IOC_TYPE_FC:
+               aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
+               break;
+       case BFA_IOC_TYPE_FCoE:
+               aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
+               aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
+               break;
+       case BFA_IOC_TYPE_LL:
+               aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
+               break;
+       default:
+               WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
+               break;
+       }
+
+       /* Send the AEN notification */
+       aen_entry->aen_data.ioc.ioc_type = ioc_type;
+       bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
+                                 BFA_AEN_CAT_IOC, event);
 }
 
 /*
@@ -2531,7 +2770,7 @@ bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
 
        bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
                    bfa_ioc_portid(ioc));
-       req->ioc_class = ioc->ioc_mc;
+       req->clscode = cpu_to_be16(ioc->clscode);
        bfa_ioc_mbox_queue(ioc, &cmd);
 }
 
@@ -2673,6 +2912,7 @@ static void
 bfa_ioc_recover(struct bfa_ioc_s *ioc)
 {
        bfa_ioc_stats(ioc, ioc_hbfails);
+       ioc->stats.hb_count = ioc->hb_count;
        bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
 }
 
@@ -2681,6 +2921,10 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
 {
        if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
                return;
+       if (ioc->attr->nwwn == 0)
+               bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
+       if (ioc->attr->pwwn == 0)
+               bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
 }
 
 /*
@@ -2703,6 +2947,34 @@ bfa_iocpf_sem_timeout(void *ioc_arg)
        bfa_ioc_hw_sem_get(ioc);
 }
 
+static void
+bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
+{
+       u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+       bfa_trc(ioc, fwstate);
+
+       if (fwstate == BFI_IOC_DISABLED) {
+               bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
+               return;
+       }
+
+       if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
+               bfa_iocpf_timeout(ioc);
+       else {
+               ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
+               bfa_iocpf_poll_timer_start(ioc);
+       }
+}
+
+static void
+bfa_iocpf_poll_timeout(void *ioc_arg)
+{
+       struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
+
+       bfa_ioc_poll_fwinit(ioc);
+}
+
 /*
  *  bfa timer function
  */
@@ -2770,3 +3042,2942 @@ bfa_timer_stop(struct bfa_timer_s *timer)
 
        list_del(&timer->qe);
 }
+
+/*
+ *     ASIC block related
+ */
+static void
+bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
+{
+       struct bfa_ablk_cfg_inst_s *cfg_inst;
+       int i, j;
+       u16     be16;
+       u32     be32;
+
+       for (i = 0; i < BFA_ABLK_MAX; i++) {
+               cfg_inst = &cfg->inst[i];
+               for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
+                       be16 = cfg_inst->pf_cfg[j].pers;
+                       cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
+                       be16 = cfg_inst->pf_cfg[j].num_qpairs;
+                       cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
+                       be16 = cfg_inst->pf_cfg[j].num_vectors;
+                       cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
+                       be32 = cfg_inst->pf_cfg[j].bw;
+                       cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
+               }
+       }
+}
+
+static void
+bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
+{
+       struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
+       struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
+       bfa_ablk_cbfn_t cbfn;
+
+       WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
+       bfa_trc(ablk->ioc, msg->mh.msg_id);
+
+       switch (msg->mh.msg_id) {
+       case BFI_ABLK_I2H_QUERY:
+               if (rsp->status == BFA_STATUS_OK) {
+                       memcpy(ablk->cfg, ablk->dma_addr.kva,
+                               sizeof(struct bfa_ablk_cfg_s));
+                       bfa_ablk_config_swap(ablk->cfg);
+                       ablk->cfg = NULL;
+               }
+               break;
+
+       case BFI_ABLK_I2H_ADPT_CONFIG:
+       case BFI_ABLK_I2H_PORT_CONFIG:
+               /* update config port mode */
+               ablk->ioc->port_mode_cfg = rsp->port_mode;
+
+       case BFI_ABLK_I2H_PF_DELETE:
+       case BFI_ABLK_I2H_PF_UPDATE:
+       case BFI_ABLK_I2H_OPTROM_ENABLE:
+       case BFI_ABLK_I2H_OPTROM_DISABLE:
+               /* No-op */
+               break;
+
+       case BFI_ABLK_I2H_PF_CREATE:
+               *(ablk->pcifn) = rsp->pcifn;
+               ablk->pcifn = NULL;
+               break;
+
+       default:
+               WARN_ON(1);
+       }
+
+       ablk->busy = BFA_FALSE;
+       if (ablk->cbfn) {
+               cbfn = ablk->cbfn;
+               ablk->cbfn = NULL;
+               cbfn(ablk->cbarg, rsp->status);
+       }
+}
+
+static void
+bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+       struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
+
+       bfa_trc(ablk->ioc, event);
+
+       switch (event) {
+       case BFA_IOC_E_ENABLED:
+               WARN_ON(ablk->busy != BFA_FALSE);
+               break;
+
+       case BFA_IOC_E_DISABLED:
+       case BFA_IOC_E_FAILED:
+               /* Fail any pending requests */
+               ablk->pcifn = NULL;
+               if (ablk->busy) {
+                       if (ablk->cbfn)
+                               ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
+                       ablk->cbfn = NULL;
+                       ablk->busy = BFA_FALSE;
+               }
+               break;
+
+       default:
+               WARN_ON(1);
+               break;
+       }
+}
+
+u32
+bfa_ablk_meminfo(void)
+{
+       return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
+}
+
+void
+bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
+{
+       ablk->dma_addr.kva = dma_kva;
+       ablk->dma_addr.pa  = dma_pa;
+}
+
+void
+bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
+{
+       ablk->ioc = ioc;
+
+       bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
+       bfa_q_qe_init(&ablk->ioc_notify);
+       bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
+       list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
+}
+
+bfa_status_t
+bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
+               bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_ablk_h2i_query_s *m;
+
+       WARN_ON(!ablk_cfg);
+
+       if (!bfa_ioc_is_operational(ablk->ioc)) {
+               bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+
+       if (ablk->busy) {
+               bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+               return  BFA_STATUS_DEVBUSY;
+       }
+
+       ablk->cfg = ablk_cfg;
+       ablk->cbfn  = cbfn;
+       ablk->cbarg = cbarg;
+       ablk->busy  = BFA_TRUE;
+
+       m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
+       bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
+                   bfa_ioc_portid(ablk->ioc));
+       bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
+       bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
+               u8 port, enum bfi_pcifn_class personality, int bw,
+               bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_ablk_h2i_pf_req_s *m;
+
+       if (!bfa_ioc_is_operational(ablk->ioc)) {
+               bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+
+       if (ablk->busy) {
+               bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+               return  BFA_STATUS_DEVBUSY;
+       }
+
+       ablk->pcifn = pcifn;
+       ablk->cbfn = cbfn;
+       ablk->cbarg = cbarg;
+       ablk->busy  = BFA_TRUE;
+
+       m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
+       bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
+                   bfa_ioc_portid(ablk->ioc));
+       m->pers = cpu_to_be16((u16)personality);
+       m->bw = cpu_to_be32(bw);
+       m->port = port;
+       bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
+               bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_ablk_h2i_pf_req_s *m;
+
+       if (!bfa_ioc_is_operational(ablk->ioc)) {
+               bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+
+       if (ablk->busy) {
+               bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+               return  BFA_STATUS_DEVBUSY;
+       }
+
+       ablk->cbfn  = cbfn;
+       ablk->cbarg = cbarg;
+       ablk->busy  = BFA_TRUE;
+
+       m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
+       bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
+                   bfa_ioc_portid(ablk->ioc));
+       m->pcifn = (u8)pcifn;
+       bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
+               int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_ablk_h2i_cfg_req_s *m;
+
+       if (!bfa_ioc_is_operational(ablk->ioc)) {
+               bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+
+       if (ablk->busy) {
+               bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+               return  BFA_STATUS_DEVBUSY;
+       }
+
+       ablk->cbfn  = cbfn;
+       ablk->cbarg = cbarg;
+       ablk->busy  = BFA_TRUE;
+
+       m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
+       bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
+                   bfa_ioc_portid(ablk->ioc));
+       m->mode = (u8)mode;
+       m->max_pf = (u8)max_pf;
+       m->max_vf = (u8)max_vf;
+       bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
+               int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_ablk_h2i_cfg_req_s *m;
+
+       if (!bfa_ioc_is_operational(ablk->ioc)) {
+               bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+
+       if (ablk->busy) {
+               bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+               return  BFA_STATUS_DEVBUSY;
+       }
+
+       ablk->cbfn  = cbfn;
+       ablk->cbarg = cbarg;
+       ablk->busy  = BFA_TRUE;
+
+       m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
+       bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
+               bfa_ioc_portid(ablk->ioc));
+       m->port = (u8)port;
+       m->mode = (u8)mode;
+       m->max_pf = (u8)max_pf;
+       m->max_vf = (u8)max_vf;
+       bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
+               bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_ablk_h2i_pf_req_s *m;
+
+       if (!bfa_ioc_is_operational(ablk->ioc)) {
+               bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+
+       if (ablk->busy) {
+               bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+               return  BFA_STATUS_DEVBUSY;
+       }
+
+       ablk->cbfn  = cbfn;
+       ablk->cbarg = cbarg;
+       ablk->busy  = BFA_TRUE;
+
+       m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
+       bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
+               bfa_ioc_portid(ablk->ioc));
+       m->pcifn = (u8)pcifn;
+       m->bw = cpu_to_be32(bw);
+       bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_ablk_h2i_optrom_s *m;
+
+       if (!bfa_ioc_is_operational(ablk->ioc)) {
+               bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+
+       if (ablk->busy) {
+               bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+               return  BFA_STATUS_DEVBUSY;
+       }
+
+       ablk->cbfn  = cbfn;
+       ablk->cbarg = cbarg;
+       ablk->busy  = BFA_TRUE;
+
+       m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
+       bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
+               bfa_ioc_portid(ablk->ioc));
+       bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+       return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_ablk_h2i_optrom_s *m;
+
+       if (!bfa_ioc_is_operational(ablk->ioc)) {
+               bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+
+       if (ablk->busy) {
+               bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+               return  BFA_STATUS_DEVBUSY;
+       }
+
+       ablk->cbfn  = cbfn;
+       ablk->cbarg = cbarg;
+       ablk->busy  = BFA_TRUE;
+
+       m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
+       bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
+               bfa_ioc_portid(ablk->ioc));
+       bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ *     SFP module specific
+ */
+
+/* forward declarations */
+static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
+static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
+static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
+                               enum bfa_port_speed portspeed);
+
+static void
+bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
+{
+       bfa_trc(sfp, sfp->lock);
+       if (sfp->cbfn)
+               sfp->cbfn(sfp->cbarg, sfp->status);
+       sfp->lock = 0;
+       sfp->cbfn = NULL;
+}
+
+static void
+bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
+{
+       bfa_trc(sfp, sfp->portspeed);
+       if (sfp->media) {
+               bfa_sfp_media_get(sfp);
+               if (sfp->state_query_cbfn)
+                       sfp->state_query_cbfn(sfp->state_query_cbarg,
+                                       sfp->status);
+                       sfp->media = NULL;
+               }
+
+               if (sfp->portspeed) {
+                       sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
+                       if (sfp->state_query_cbfn)
+                               sfp->state_query_cbfn(sfp->state_query_cbarg,
+                                               sfp->status);
+                               sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
+               }
+
+               sfp->state_query_lock = 0;
+               sfp->state_query_cbfn = NULL;
+}
+
+/*
+ *     IOC event handler.
+ */
+static void
+bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
+{
+       struct bfa_sfp_s *sfp = sfp_arg;
+
+       bfa_trc(sfp, event);
+       bfa_trc(sfp, sfp->lock);
+       bfa_trc(sfp, sfp->state_query_lock);
+
+       switch (event) {
+       case BFA_IOC_E_DISABLED:
+       case BFA_IOC_E_FAILED:
+               if (sfp->lock) {
+                       sfp->status = BFA_STATUS_IOC_FAILURE;
+                       bfa_cb_sfp_show(sfp);
+               }
+
+               if (sfp->state_query_lock) {
+                       sfp->status = BFA_STATUS_IOC_FAILURE;
+                       bfa_cb_sfp_state_query(sfp);
+               }
+               break;
+
+       default:
+               break;
+       }
+}
+
+/*
+ * SFP's State Change Notification post to AEN
+ */
+static void
+bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
+{
+       struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
+       struct bfa_aen_entry_s  *aen_entry;
+       enum bfa_port_aen_event aen_evt = 0;
+
+       bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
+                     ((u64)rsp->event));
+
+       bfad_get_aen_entry(bfad, aen_entry);
+       if (!aen_entry)
+               return;
+
+       aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
+       aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
+       aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
+
+       switch (rsp->event) {
+       case BFA_SFP_SCN_INSERTED:
+               aen_evt = BFA_PORT_AEN_SFP_INSERT;
+               break;
+       case BFA_SFP_SCN_REMOVED:
+               aen_evt = BFA_PORT_AEN_SFP_REMOVE;
+               break;
+       case BFA_SFP_SCN_FAILED:
+               aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
+               break;
+       case BFA_SFP_SCN_UNSUPPORT:
+               aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
+               break;
+       case BFA_SFP_SCN_POM:
+               aen_evt = BFA_PORT_AEN_SFP_POM;
+               aen_entry->aen_data.port.level = rsp->pomlvl;
+               break;
+       default:
+               bfa_trc(sfp, rsp->event);
+               WARN_ON(1);
+       }
+
+       /* Send the AEN notification */
+       bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
+                                 BFA_AEN_CAT_PORT, aen_evt);
+}
+
+/*
+ *     SFP get data send
+ */
+static void
+bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
+{
+       struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
+
+       bfa_trc(sfp, req->memtype);
+
+       /* build host command */
+       bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
+                       bfa_ioc_portid(sfp->ioc));
+
+       /* send mbox cmd */
+       bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
+}
+
+/*
+ *     SFP is valid, read sfp data
+ */
+static void
+bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
+{
+       struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
+
+       WARN_ON(sfp->lock != 0);
+       bfa_trc(sfp, sfp->state);
+
+       sfp->lock = 1;
+       sfp->memtype = memtype;
+       req->memtype = memtype;
+
+       /* Setup SG list */
+       bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
+
+       bfa_sfp_getdata_send(sfp);
+}
+
+/*
+ *     SFP scn handler
+ */
+static void
+bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
+{
+       struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
+
+       switch (rsp->event) {
+       case BFA_SFP_SCN_INSERTED:
+               sfp->state = BFA_SFP_STATE_INSERTED;
+               sfp->data_valid = 0;
+               bfa_sfp_scn_aen_post(sfp, rsp);
+               break;
+       case BFA_SFP_SCN_REMOVED:
+               sfp->state = BFA_SFP_STATE_REMOVED;
+               sfp->data_valid = 0;
+               bfa_sfp_scn_aen_post(sfp, rsp);
+                break;
+       case BFA_SFP_SCN_FAILED:
+               sfp->state = BFA_SFP_STATE_FAILED;
+               sfp->data_valid = 0;
+               bfa_sfp_scn_aen_post(sfp, rsp);
+               break;
+       case BFA_SFP_SCN_UNSUPPORT:
+               sfp->state = BFA_SFP_STATE_UNSUPPORT;
+               bfa_sfp_scn_aen_post(sfp, rsp);
+               if (!sfp->lock)
+                       bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+               break;
+       case BFA_SFP_SCN_POM:
+               bfa_sfp_scn_aen_post(sfp, rsp);
+               break;
+       case BFA_SFP_SCN_VALID:
+               sfp->state = BFA_SFP_STATE_VALID;
+               if (!sfp->lock)
+                       bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+               break;
+       default:
+               bfa_trc(sfp, rsp->event);
+               WARN_ON(1);
+       }
+}
+
+/*
+ * SFP show complete
+ */
+static void
+bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
+{
+       struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
+
+       if (!sfp->lock) {
+               /*
+                * receiving response after ioc failure
+                */
+               bfa_trc(sfp, sfp->lock);
+               return;
+       }
+
+       bfa_trc(sfp, rsp->status);
+       if (rsp->status == BFA_STATUS_OK) {
+               sfp->data_valid = 1;
+               if (sfp->state == BFA_SFP_STATE_VALID)
+                       sfp->status = BFA_STATUS_OK;
+               else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
+                       sfp->status = BFA_STATUS_SFP_UNSUPP;
+               else
+                       bfa_trc(sfp, sfp->state);
+       } else {
+               sfp->data_valid = 0;
+               sfp->status = rsp->status;
+               /* sfpshow shouldn't change sfp state */
+       }
+
+       bfa_trc(sfp, sfp->memtype);
+       if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
+               bfa_trc(sfp, sfp->data_valid);
+               if (sfp->data_valid) {
+                       u32     size = sizeof(struct sfp_mem_s);
+                       u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
+                       memcpy(des, sfp->dbuf_kva, size);
+               }
+               /*
+                * Queue completion callback.
+                */
+               bfa_cb_sfp_show(sfp);
+       } else
+               sfp->lock = 0;
+
+       bfa_trc(sfp, sfp->state_query_lock);
+       if (sfp->state_query_lock) {
+               sfp->state = rsp->state;
+               /* Complete callback */
+               bfa_cb_sfp_state_query(sfp);
+       }
+}
+
+/*
+ *     SFP query fw sfp state
+ */
+static void
+bfa_sfp_state_query(struct bfa_sfp_s *sfp)
+{
+       struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
+
+       /* Should not be doing query if not in _INIT state */
+       WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
+       WARN_ON(sfp->state_query_lock != 0);
+       bfa_trc(sfp, sfp->state);
+
+       sfp->state_query_lock = 1;
+       req->memtype = 0;
+
+       if (!sfp->lock)
+               bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+}
+
+static void
+bfa_sfp_media_get(struct bfa_sfp_s *sfp)
+{
+       enum bfa_defs_sfp_media_e *media = sfp->media;
+
+       *media = BFA_SFP_MEDIA_UNKNOWN;
+
+       if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
+               *media = BFA_SFP_MEDIA_UNSUPPORT;
+       else if (sfp->state == BFA_SFP_STATE_VALID) {
+               union sfp_xcvr_e10g_code_u e10g;
+               struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
+               u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
+                               (sfpmem->srlid_base.xcvr[5] >> 1);
+
+               e10g.b = sfpmem->srlid_base.xcvr[0];
+               bfa_trc(sfp, e10g.b);
+               bfa_trc(sfp, xmtr_tech);
+               /* check fc transmitter tech */
+               if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
+                   (xmtr_tech & SFP_XMTR_TECH_CP) ||
+                   (xmtr_tech & SFP_XMTR_TECH_CA))
+                       *media = BFA_SFP_MEDIA_CU;
+               else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
+                        (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
+                       *media = BFA_SFP_MEDIA_EL;
+               else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
+                        (xmtr_tech & SFP_XMTR_TECH_LC))
+                       *media = BFA_SFP_MEDIA_LW;
+               else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
+                        (xmtr_tech & SFP_XMTR_TECH_SN) ||
+                        (xmtr_tech & SFP_XMTR_TECH_SA))
+                       *media = BFA_SFP_MEDIA_SW;
+               /* Check 10G Ethernet Compilance code */
+               else if (e10g.b & 0x10)
+                       *media = BFA_SFP_MEDIA_SW;
+               else if (e10g.b & 0x60)
+                       *media = BFA_SFP_MEDIA_LW;
+               else if (e10g.r.e10g_unall & 0x80)
+                       *media = BFA_SFP_MEDIA_UNKNOWN;
+               else
+                       bfa_trc(sfp, 0);
+       } else
+               bfa_trc(sfp, sfp->state);
+}
+
+static bfa_status_t
+bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
+{
+       struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
+       struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
+       union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
+       union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
+
+       if (portspeed == BFA_PORT_SPEED_10GBPS) {
+               if (e10g.r.e10g_sr || e10g.r.e10g_lr)
+                       return BFA_STATUS_OK;
+               else {
+                       bfa_trc(sfp, e10g.b);
+                       return BFA_STATUS_UNSUPP_SPEED;
+               }
+       }
+       if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
+           ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
+           ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
+           ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
+           ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
+               return BFA_STATUS_OK;
+       else {
+               bfa_trc(sfp, portspeed);
+               bfa_trc(sfp, fc3.b);
+               bfa_trc(sfp, e10g.b);
+               return BFA_STATUS_UNSUPP_SPEED;
+       }
+}
+
+/*
+ *     SFP hmbox handler
+ */
+void
+bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
+{
+       struct bfa_sfp_s *sfp = sfparg;
+
+       switch (msg->mh.msg_id) {
+       case BFI_SFP_I2H_SHOW:
+               bfa_sfp_show_comp(sfp, msg);
+               break;
+
+       case BFI_SFP_I2H_SCN:
+               bfa_sfp_scn(sfp, msg);
+               break;
+
+       default:
+               bfa_trc(sfp, msg->mh.msg_id);
+               WARN_ON(1);
+       }
+}
+
+/*
+ *     Return DMA memory needed by sfp module.
+ */
+u32
+bfa_sfp_meminfo(void)
+{
+       return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ *     Attach virtual and physical memory for SFP.
+ */
+void
+bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
+               struct bfa_trc_mod_s *trcmod)
+{
+       sfp->dev = dev;
+       sfp->ioc = ioc;
+       sfp->trcmod = trcmod;
+
+       sfp->cbfn = NULL;
+       sfp->cbarg = NULL;
+       sfp->sfpmem = NULL;
+       sfp->lock = 0;
+       sfp->data_valid = 0;
+       sfp->state = BFA_SFP_STATE_INIT;
+       sfp->state_query_lock = 0;
+       sfp->state_query_cbfn = NULL;
+       sfp->state_query_cbarg = NULL;
+       sfp->media = NULL;
+       sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
+       sfp->is_elb = BFA_FALSE;
+
+       bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
+       bfa_q_qe_init(&sfp->ioc_notify);
+       bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
+       list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
+}
+
+/*
+ *     Claim Memory for SFP
+ */
+void
+bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
+{
+       sfp->dbuf_kva   = dm_kva;
+       sfp->dbuf_pa    = dm_pa;
+       memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
+
+       dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
+       dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Show SFP eeprom content
+ *
+ * @param[in] sfp   - bfa sfp module
+ *
+ * @param[out] sfpmem - sfp eeprom data
+ *
+ */
+bfa_status_t
+bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
+               bfa_cb_sfp_t cbfn, void *cbarg)
+{
+
+       if (!bfa_ioc_is_operational(sfp->ioc)) {
+               bfa_trc(sfp, 0);
+               return BFA_STATUS_IOC_NON_OP;
+       }
+
+       if (sfp->lock) {
+               bfa_trc(sfp, 0);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       sfp->cbfn = cbfn;
+       sfp->cbarg = cbarg;
+       sfp->sfpmem = sfpmem;
+
+       bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Return SFP Media type
+ *
+ * @param[in] sfp   - bfa sfp module
+ *
+ * @param[out] media - port speed from user
+ *
+ */
+bfa_status_t
+bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
+               bfa_cb_sfp_t cbfn, void *cbarg)
+{
+       if (!bfa_ioc_is_operational(sfp->ioc)) {
+               bfa_trc(sfp, 0);
+               return BFA_STATUS_IOC_NON_OP;
+       }
+
+       sfp->media = media;
+       if (sfp->state == BFA_SFP_STATE_INIT) {
+               if (sfp->state_query_lock) {
+                       bfa_trc(sfp, 0);
+                       return BFA_STATUS_DEVBUSY;
+               } else {
+                       sfp->state_query_cbfn = cbfn;
+                       sfp->state_query_cbarg = cbarg;
+                       bfa_sfp_state_query(sfp);
+                       return BFA_STATUS_SFP_NOT_READY;
+               }
+       }
+
+       bfa_sfp_media_get(sfp);
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Check if user set port speed is allowed by the SFP
+ *
+ * @param[in] sfp   - bfa sfp module
+ * @param[in] portspeed - port speed from user
+ *
+ */
+bfa_status_t
+bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
+               bfa_cb_sfp_t cbfn, void *cbarg)
+{
+       WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
+
+       if (!bfa_ioc_is_operational(sfp->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /* For Mezz card, all speed is allowed */
+       if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
+               return BFA_STATUS_OK;
+
+       /* Check SFP state */
+       sfp->portspeed = portspeed;
+       if (sfp->state == BFA_SFP_STATE_INIT) {
+               if (sfp->state_query_lock) {
+                       bfa_trc(sfp, 0);
+                       return BFA_STATUS_DEVBUSY;
+               } else {
+                       sfp->state_query_cbfn = cbfn;
+                       sfp->state_query_cbarg = cbarg;
+                       bfa_sfp_state_query(sfp);
+                       return BFA_STATUS_SFP_NOT_READY;
+               }
+       }
+
+       if (sfp->state == BFA_SFP_STATE_REMOVED ||
+           sfp->state == BFA_SFP_STATE_FAILED) {
+               bfa_trc(sfp, sfp->state);
+               return BFA_STATUS_NO_SFP_DEV;
+       }
+
+       if (sfp->state == BFA_SFP_STATE_INSERTED) {
+               bfa_trc(sfp, sfp->state);
+               return BFA_STATUS_DEVBUSY;  /* sfp is reading data */
+       }
+
+       /* For eloopback, all speed is allowed */
+       if (sfp->is_elb)
+               return BFA_STATUS_OK;
+
+       return bfa_sfp_speed_valid(sfp, portspeed);
+}
+
+/*
+ *     Flash module specific
+ */
+
+/*
+ * FLASH DMA buffer should be big enough to hold both MFG block and
+ * asic block(64k) at the same time and also should be 2k aligned to
+ * avoid write segement to cross sector boundary.
+ */
+#define BFA_FLASH_SEG_SZ       2048
+#define BFA_FLASH_DMA_BUF_SZ   \
+       BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
+
+static void
+bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
+                       int inst, int type)
+{
+       struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+       struct bfa_aen_entry_s  *aen_entry;
+
+       bfad_get_aen_entry(bfad, aen_entry);
+       if (!aen_entry)
+               return;
+
+       aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
+       aen_entry->aen_data.audit.partition_inst = inst;
+       aen_entry->aen_data.audit.partition_type = type;
+
+       /* Send the AEN notification */
+       bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
+                                 BFA_AEN_CAT_AUDIT, event);
+}
+
+static void
+bfa_flash_cb(struct bfa_flash_s *flash)
+{
+       flash->op_busy = 0;
+       if (flash->cbfn)
+               flash->cbfn(flash->cbarg, flash->status);
+}
+
+static void
+bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+       struct bfa_flash_s      *flash = cbarg;
+
+       bfa_trc(flash, event);
+       switch (event) {
+       case BFA_IOC_E_DISABLED:
+       case BFA_IOC_E_FAILED:
+               if (flash->op_busy) {
+                       flash->status = BFA_STATUS_IOC_FAILURE;
+                       flash->cbfn(flash->cbarg, flash->status);
+                       flash->op_busy = 0;
+               }
+               break;
+
+       default:
+               break;
+       }
+}
+
+/*
+ * Send flash attribute query request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_query_send(void *cbarg)
+{
+       struct bfa_flash_s *flash = cbarg;
+       struct bfi_flash_query_req_s *msg =
+                       (struct bfi_flash_query_req_s *) flash->mb.msg;
+
+       bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
+               bfa_ioc_portid(flash->ioc));
+       bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
+               flash->dbuf_pa);
+       bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+}
+
+/*
+ * Send flash write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_write_send(struct bfa_flash_s *flash)
+{
+       struct bfi_flash_write_req_s *msg =
+                       (struct bfi_flash_write_req_s *) flash->mb.msg;
+       u32     len;
+
+       msg->type = be32_to_cpu(flash->type);
+       msg->instance = flash->instance;
+       msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+       len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+               flash->residue : BFA_FLASH_DMA_BUF_SZ;
+       msg->length = be32_to_cpu(len);
+
+       /* indicate if it's the last msg of the whole write operation */
+       msg->last = (len == flash->residue) ? 1 : 0;
+
+       bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
+                       bfa_ioc_portid(flash->ioc));
+       bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+       memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
+       bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+
+       flash->residue -= len;
+       flash->offset += len;
+}
+
+/*
+ * Send flash read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_read_send(void *cbarg)
+{
+       struct bfa_flash_s *flash = cbarg;
+       struct bfi_flash_read_req_s *msg =
+                       (struct bfi_flash_read_req_s *) flash->mb.msg;
+       u32     len;
+
+       msg->type = be32_to_cpu(flash->type);
+       msg->instance = flash->instance;
+       msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+       len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+                       flash->residue : BFA_FLASH_DMA_BUF_SZ;
+       msg->length = be32_to_cpu(len);
+       bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
+               bfa_ioc_portid(flash->ioc));
+       bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+       bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+}
+
+/*
+ * Send flash erase request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_erase_send(void *cbarg)
+{
+       struct bfa_flash_s *flash = cbarg;
+       struct bfi_flash_erase_req_s *msg =
+                       (struct bfi_flash_erase_req_s *) flash->mb.msg;
+
+       msg->type = be32_to_cpu(flash->type);
+       msg->instance = flash->instance;
+       bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
+                       bfa_ioc_portid(flash->ioc));
+       bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+}
+
+/*
+ * Process flash response messages upon receiving interrupts.
+ *
+ * @param[in] flasharg - flash structure
+ * @param[in] msg - message structure
+ */
+static void
+bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
+{
+       struct bfa_flash_s *flash = flasharg;
+       u32     status;
+
+       union {
+               struct bfi_flash_query_rsp_s *query;
+               struct bfi_flash_erase_rsp_s *erase;
+               struct bfi_flash_write_rsp_s *write;
+               struct bfi_flash_read_rsp_s *read;
+               struct bfi_flash_event_s *event;
+               struct bfi_mbmsg_s   *msg;
+       } m;
+
+       m.msg = msg;
+       bfa_trc(flash, msg->mh.msg_id);
+
+       if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
+               /* receiving response after ioc failure */
+               bfa_trc(flash, 0x9999);
+               return;
+       }
+
+       switch (msg->mh.msg_id) {
+       case BFI_FLASH_I2H_QUERY_RSP:
+               status = be32_to_cpu(m.query->status);
+               bfa_trc(flash, status);
+               if (status == BFA_STATUS_OK) {
+                       u32     i;
+                       struct bfa_flash_attr_s *attr, *f;
+
+                       attr = (struct bfa_flash_attr_s *) flash->ubuf;
+                       f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
+                       attr->status = be32_to_cpu(f->status);
+                       attr->npart = be32_to_cpu(f->npart);
+                       bfa_trc(flash, attr->status);
+                       bfa_trc(flash, attr->npart);
+                       for (i = 0; i < attr->npart; i++) {
+                               attr->part[i].part_type =
+                                       be32_to_cpu(f->part[i].part_type);
+                               attr->part[i].part_instance =
+                                       be32_to_cpu(f->part[i].part_instance);
+                               attr->part[i].part_off =
+                                       be32_to_cpu(f->part[i].part_off);
+                               attr->part[i].part_size =
+                                       be32_to_cpu(f->part[i].part_size);
+                               attr->part[i].part_len =
+                                       be32_to_cpu(f->part[i].part_len);
+                               attr->part[i].part_status =
+                                       be32_to_cpu(f->part[i].part_status);
+                       }
+               }
+               flash->status = status;
+               bfa_flash_cb(flash);
+               break;
+       case BFI_FLASH_I2H_ERASE_RSP:
+               status = be32_to_cpu(m.erase->status);
+               bfa_trc(flash, status);
+               flash->status = status;
+               bfa_flash_cb(flash);
+               break;
+       case BFI_FLASH_I2H_WRITE_RSP:
+               status = be32_to_cpu(m.write->status);
+               bfa_trc(flash, status);
+               if (status != BFA_STATUS_OK || flash->residue == 0) {
+                       flash->status = status;
+                       bfa_flash_cb(flash);
+               } else {
+                       bfa_trc(flash, flash->offset);
+                       bfa_flash_write_send(flash);
+               }
+               break;
+       case BFI_FLASH_I2H_READ_RSP:
+               status = be32_to_cpu(m.read->status);
+               bfa_trc(flash, status);
+               if (status != BFA_STATUS_OK) {
+                       flash->status = status;
+                       bfa_flash_cb(flash);
+               } else {
+                       u32 len = be32_to_cpu(m.read->length);
+                       bfa_trc(flash, flash->offset);
+                       bfa_trc(flash, len);
+                       memcpy(flash->ubuf + flash->offset,
+                               flash->dbuf_kva, len);
+                       flash->residue -= len;
+                       flash->offset += len;
+                       if (flash->residue == 0) {
+                               flash->status = status;
+                               bfa_flash_cb(flash);
+                       } else
+                               bfa_flash_read_send(flash);
+               }
+               break;
+       case BFI_FLASH_I2H_BOOT_VER_RSP:
+               break;
+       case BFI_FLASH_I2H_EVENT:
+               status = be32_to_cpu(m.event->status);
+               bfa_trc(flash, status);
+               if (status == BFA_STATUS_BAD_FWCFG)
+                       bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
+               else if (status == BFA_STATUS_INVALID_VENDOR) {
+                       u32 param;
+                       param = be32_to_cpu(m.event->param);
+                       bfa_trc(flash, param);
+                       bfa_ioc_aen_post(flash->ioc,
+                               BFA_IOC_AEN_INVALID_VENDOR);
+               }
+               break;
+
+       default:
+               WARN_ON(1);
+       }
+}
+
+/*
+ * Flash memory info API.
+ *
+ * @param[in] mincfg - minimal cfg variable
+ */
+u32
+bfa_flash_meminfo(bfa_boolean_t mincfg)
+{
+       /* min driver doesn't need flash */
+       if (mincfg)
+               return 0;
+       return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] ioc  - ioc structure
+ * @param[in] dev  - device structure
+ * @param[in] trcmod - trace module
+ * @param[in] logmod - log module
+ */
+void
+bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
+               struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
+{
+       flash->ioc = ioc;
+       flash->trcmod = trcmod;
+       flash->cbfn = NULL;
+       flash->cbarg = NULL;
+       flash->op_busy = 0;
+
+       bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
+       bfa_q_qe_init(&flash->ioc_notify);
+       bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
+       list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
+
+       /* min driver doesn't need flash */
+       if (mincfg) {
+               flash->dbuf_kva = NULL;
+               flash->dbuf_pa = 0;
+       }
+}
+
+/*
+ * Claim memory for flash
+ *
+ * @param[in] flash - flash structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ * @param[in] mincfg - minimal cfg variable
+ */
+void
+bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
+               bfa_boolean_t mincfg)
+{
+       if (mincfg)
+               return;
+
+       flash->dbuf_kva = dm_kva;
+       flash->dbuf_pa = dm_pa;
+       memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
+       dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+       dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Get flash attribute.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] attr - flash attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
+               bfa_cb_flash_t cbfn, void *cbarg)
+{
+       bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
+
+       if (!bfa_ioc_is_operational(flash->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       if (flash->op_busy) {
+               bfa_trc(flash, flash->op_busy);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       flash->op_busy = 1;
+       flash->cbfn = cbfn;
+       flash->cbarg = cbarg;
+       flash->ubuf = (u8 *) attr;
+       bfa_flash_query_send(flash);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Erase flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
+               u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
+{
+       bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
+       bfa_trc(flash, type);
+       bfa_trc(flash, instance);
+
+       if (!bfa_ioc_is_operational(flash->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       if (flash->op_busy) {
+               bfa_trc(flash, flash->op_busy);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       flash->op_busy = 1;
+       flash->cbfn = cbfn;
+       flash->cbarg = cbarg;
+       flash->type = type;
+       flash->instance = instance;
+
+       bfa_flash_erase_send(flash);
+       bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
+                               instance, type);
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Update flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
+               u8 instance, void *buf, u32 len, u32 offset,
+               bfa_cb_flash_t cbfn, void *cbarg)
+{
+       bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
+       bfa_trc(flash, type);
+       bfa_trc(flash, instance);
+       bfa_trc(flash, len);
+       bfa_trc(flash, offset);
+
+       if (!bfa_ioc_is_operational(flash->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /*
+        * 'len' must be in word (4-byte) boundary
+        * 'offset' must be in sector (16kb) boundary
+        */
+       if (!len || (len & 0x03) || (offset & 0x00003FFF))
+               return BFA_STATUS_FLASH_BAD_LEN;
+
+       if (type == BFA_FLASH_PART_MFG)
+               return BFA_STATUS_EINVAL;
+
+       if (flash->op_busy) {
+               bfa_trc(flash, flash->op_busy);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       flash->op_busy = 1;
+       flash->cbfn = cbfn;
+       flash->cbarg = cbarg;
+       flash->type = type;
+       flash->instance = instance;
+       flash->residue = len;
+       flash->offset = 0;
+       flash->addr_off = offset;
+       flash->ubuf = buf;
+
+       bfa_flash_write_send(flash);
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Read flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
+               u8 instance, void *buf, u32 len, u32 offset,
+               bfa_cb_flash_t cbfn, void *cbarg)
+{
+       bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
+       bfa_trc(flash, type);
+       bfa_trc(flash, instance);
+       bfa_trc(flash, len);
+       bfa_trc(flash, offset);
+
+       if (!bfa_ioc_is_operational(flash->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /*
+        * 'len' must be in word (4-byte) boundary
+        * 'offset' must be in sector (16kb) boundary
+        */
+       if (!len || (len & 0x03) || (offset & 0x00003FFF))
+               return BFA_STATUS_FLASH_BAD_LEN;
+
+       if (flash->op_busy) {
+               bfa_trc(flash, flash->op_busy);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       flash->op_busy = 1;
+       flash->cbfn = cbfn;
+       flash->cbarg = cbarg;
+       flash->type = type;
+       flash->instance = instance;
+       flash->residue = len;
+       flash->offset = 0;
+       flash->addr_off = offset;
+       flash->ubuf = buf;
+       bfa_flash_read_send(flash);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ *     DIAG module specific
+ */
+
+#define BFA_DIAG_MEMTEST_TOV   50000   /* memtest timeout in msec */
+#define BFA_DIAG_FWPING_TOV    1000    /* msec */
+
+/* IOC event handler */
+static void
+bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
+{
+       struct bfa_diag_s *diag = diag_arg;
+
+       bfa_trc(diag, event);
+       bfa_trc(diag, diag->block);
+       bfa_trc(diag, diag->fwping.lock);
+       bfa_trc(diag, diag->tsensor.lock);
+
+       switch (event) {
+       case BFA_IOC_E_DISABLED:
+       case BFA_IOC_E_FAILED:
+               if (diag->fwping.lock) {
+                       diag->fwping.status = BFA_STATUS_IOC_FAILURE;
+                       diag->fwping.cbfn(diag->fwping.cbarg,
+                                       diag->fwping.status);
+                       diag->fwping.lock = 0;
+               }
+
+               if (diag->tsensor.lock) {
+                       diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
+                       diag->tsensor.cbfn(diag->tsensor.cbarg,
+                                          diag->tsensor.status);
+                       diag->tsensor.lock = 0;
+               }
+
+               if (diag->block) {
+                       if (diag->timer_active) {
+                               bfa_timer_stop(&diag->timer);
+                               diag->timer_active = 0;
+                       }
+
+                       diag->status = BFA_STATUS_IOC_FAILURE;
+                       diag->cbfn(diag->cbarg, diag->status);
+                       diag->block = 0;
+               }
+               break;
+
+       default:
+               break;
+       }
+}
+
+static void
+bfa_diag_memtest_done(void *cbarg)
+{
+       struct bfa_diag_s *diag = cbarg;
+       struct bfa_ioc_s  *ioc = diag->ioc;
+       struct bfa_diag_memtest_result *res = diag->result;
+       u32     loff = BFI_BOOT_MEMTEST_RES_ADDR;
+       u32     pgnum, pgoff, i;
+
+       pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+       pgoff = PSS_SMEM_PGOFF(loff);
+
+       writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+       for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
+                        sizeof(u32)); i++) {
+               /* read test result from smem */
+               *((u32 *) res + i) =
+                       bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
+               loff += sizeof(u32);
+       }
+
+       /* Reset IOC fwstates to BFI_IOC_UNINIT */
+       bfa_ioc_reset_fwstate(ioc);
+
+       res->status = swab32(res->status);
+       bfa_trc(diag, res->status);
+
+       if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
+               diag->status = BFA_STATUS_OK;
+       else {
+               diag->status = BFA_STATUS_MEMTEST_FAILED;
+               res->addr = swab32(res->addr);
+               res->exp = swab32(res->exp);
+               res->act = swab32(res->act);
+               res->err_status = swab32(res->err_status);
+               res->err_status1 = swab32(res->err_status1);
+               res->err_addr = swab32(res->err_addr);
+               bfa_trc(diag, res->addr);
+               bfa_trc(diag, res->exp);
+               bfa_trc(diag, res->act);
+               bfa_trc(diag, res->err_status);
+               bfa_trc(diag, res->err_status1);
+               bfa_trc(diag, res->err_addr);
+       }
+       diag->timer_active = 0;
+       diag->cbfn(diag->cbarg, diag->status);
+       diag->block = 0;
+}
+
+/*
+ * Firmware ping
+ */
+
+/*
+ * Perform DMA test directly
+ */
+static void
+diag_fwping_send(struct bfa_diag_s *diag)
+{
+       struct bfi_diag_fwping_req_s *fwping_req;
+       u32     i;
+
+       bfa_trc(diag, diag->fwping.dbuf_pa);
+
+       /* fill DMA area with pattern */
+       for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
+               *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
+
+       /* Fill mbox msg */
+       fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
+
+       /* Setup SG list */
+       bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
+                       diag->fwping.dbuf_pa);
+       /* Set up dma count */
+       fwping_req->count = cpu_to_be32(diag->fwping.count);
+       /* Set up data pattern */
+       fwping_req->data = diag->fwping.data;
+
+       /* build host command */
+       bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
+               bfa_ioc_portid(diag->ioc));
+
+       /* send mbox cmd */
+       bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
+}
+
+static void
+diag_fwping_comp(struct bfa_diag_s *diag,
+                struct bfi_diag_fwping_rsp_s *diag_rsp)
+{
+       u32     rsp_data = diag_rsp->data;
+       u8      rsp_dma_status = diag_rsp->dma_status;
+
+       bfa_trc(diag, rsp_data);
+       bfa_trc(diag, rsp_dma_status);
+
+       if (rsp_dma_status == BFA_STATUS_OK) {
+               u32     i, pat;
+               pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
+                       diag->fwping.data;
+               /* Check mbox data */
+               if (diag->fwping.data != rsp_data) {
+                       bfa_trc(diag, rsp_data);
+                       diag->fwping.result->dmastatus =
+                                       BFA_STATUS_DATACORRUPTED;
+                       diag->fwping.status = BFA_STATUS_DATACORRUPTED;
+                       diag->fwping.cbfn(diag->fwping.cbarg,
+                                       diag->fwping.status);
+                       diag->fwping.lock = 0;
+                       return;
+               }
+               /* Check dma pattern */
+               for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
+                       if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
+                               bfa_trc(diag, i);
+                               bfa_trc(diag, pat);
+                               bfa_trc(diag,
+                                       *((u32 *)diag->fwping.dbuf_kva + i));
+                               diag->fwping.result->dmastatus =
+                                               BFA_STATUS_DATACORRUPTED;
+                               diag->fwping.status = BFA_STATUS_DATACORRUPTED;
+                               diag->fwping.cbfn(diag->fwping.cbarg,
+                                               diag->fwping.status);
+                               diag->fwping.lock = 0;
+                               return;
+                       }
+               }
+               diag->fwping.result->dmastatus = BFA_STATUS_OK;
+               diag->fwping.status = BFA_STATUS_OK;
+               diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
+               diag->fwping.lock = 0;
+       } else {
+               diag->fwping.status = BFA_STATUS_HDMA_FAILED;
+               diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
+               diag->fwping.lock = 0;
+       }
+}
+
+/*
+ * Temperature Sensor
+ */
+
+static void
+diag_tempsensor_send(struct bfa_diag_s *diag)
+{
+       struct bfi_diag_ts_req_s *msg;
+
+       msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
+       bfa_trc(diag, msg->temp);
+       /* build host command */
+       bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
+               bfa_ioc_portid(diag->ioc));
+       /* send mbox cmd */
+       bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
+}
+
+static void
+diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
+{
+       if (!diag->tsensor.lock) {
+               /* receiving response after ioc failure */
+               bfa_trc(diag, diag->tsensor.lock);
+               return;
+       }
+
+       /*
+        * ASIC junction tempsensor is a reg read operation
+        * it will always return OK
+        */
+       diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
+       diag->tsensor.temp->ts_junc = rsp->ts_junc;
+       diag->tsensor.temp->ts_brd = rsp->ts_brd;
+       diag->tsensor.temp->status = BFA_STATUS_OK;
+
+       if (rsp->ts_brd) {
+               if (rsp->status == BFA_STATUS_OK) {
+                       diag->tsensor.temp->brd_temp =
+                               be16_to_cpu(rsp->brd_temp);
+               } else {
+                       bfa_trc(diag, rsp->status);
+                       diag->tsensor.temp->brd_temp = 0;
+                       diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
+               }
+       }
+       bfa_trc(diag, rsp->ts_junc);
+       bfa_trc(diag, rsp->temp);
+       bfa_trc(diag, rsp->ts_brd);
+       bfa_trc(diag, rsp->brd_temp);
+       diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
+       diag->tsensor.lock = 0;
+}
+
+/*
+ *     LED Test command
+ */
+static void
+diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
+{
+       struct bfi_diag_ledtest_req_s  *msg;
+
+       msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
+       /* build host command */
+       bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
+                       bfa_ioc_portid(diag->ioc));
+
+       /*
+        * convert the freq from N blinks per 10 sec to
+        * crossbow ontime value. We do it here because division is need
+        */
+       if (ledtest->freq)
+               ledtest->freq = 500 / ledtest->freq;
+
+       if (ledtest->freq == 0)
+               ledtest->freq = 1;
+
+       bfa_trc(diag, ledtest->freq);
+       /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
+       msg->cmd = (u8) ledtest->cmd;
+       msg->color = (u8) ledtest->color;
+       msg->portid = bfa_ioc_portid(diag->ioc);
+       msg->led = ledtest->led;
+       msg->freq = cpu_to_be16(ledtest->freq);
+
+       /* send mbox cmd */
+       bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
+}
+
+static void
+diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s * msg)
+{
+       bfa_trc(diag, diag->ledtest.lock);
+       diag->ledtest.lock = BFA_FALSE;
+       /* no bfa_cb_queue is needed because driver is not waiting */
+}
+
+/*
+ * Port beaconing
+ */
+static void
+diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
+{
+       struct bfi_diag_portbeacon_req_s *msg;
+
+       msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
+       /* build host command */
+       bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
+               bfa_ioc_portid(diag->ioc));
+       msg->beacon = beacon;
+       msg->period = cpu_to_be32(sec);
+       /* send mbox cmd */
+       bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
+}
+
+static void
+diag_portbeacon_comp(struct bfa_diag_s *diag)
+{
+       bfa_trc(diag, diag->beacon.state);
+       diag->beacon.state = BFA_FALSE;
+       if (diag->cbfn_beacon)
+               diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
+}
+
+/*
+ *     Diag hmbox handler
+ */
+void
+bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
+{
+       struct bfa_diag_s *diag = diagarg;
+
+       switch (msg->mh.msg_id) {
+       case BFI_DIAG_I2H_PORTBEACON:
+               diag_portbeacon_comp(diag);
+               break;
+       case BFI_DIAG_I2H_FWPING:
+               diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
+               break;
+       case BFI_DIAG_I2H_TEMPSENSOR:
+               diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
+               break;
+       case BFI_DIAG_I2H_LEDTEST:
+               diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
+               break;
+       default:
+               bfa_trc(diag, msg->mh.msg_id);
+               WARN_ON(1);
+       }
+}
+
+/*
+ * Gen RAM Test
+ *
+ *   @param[in] *diag           - diag data struct
+ *   @param[in] *memtest        - mem test params input from upper layer,
+ *   @param[in] pattern         - mem test pattern
+ *   @param[in] *result         - mem test result
+ *   @param[in] cbfn            - mem test callback functioin
+ *   @param[in] cbarg           - callback functioin arg
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
+               u32 pattern, struct bfa_diag_memtest_result *result,
+               bfa_cb_diag_t cbfn, void *cbarg)
+{
+       bfa_trc(diag, pattern);
+
+       if (!bfa_ioc_adapter_is_disabled(diag->ioc))
+               return BFA_STATUS_ADAPTER_ENABLED;
+
+       /* check to see if there is another destructive diag cmd running */
+       if (diag->block) {
+               bfa_trc(diag, diag->block);
+               return BFA_STATUS_DEVBUSY;
+       } else
+               diag->block = 1;
+
+       diag->result = result;
+       diag->cbfn = cbfn;
+       diag->cbarg = cbarg;
+
+       /* download memtest code and take LPU0 out of reset */
+       bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
+
+       bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
+                       bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV);
+       diag->timer_active = 1;
+       return BFA_STATUS_OK;
+}
+
+/*
+ * DIAG firmware ping command
+ *
+ *   @param[in] *diag           - diag data struct
+ *   @param[in] cnt             - dma loop count for testing PCIE
+ *   @param[in] data            - data pattern to pass in fw
+ *   @param[in] *result         - pt to bfa_diag_fwping_result_t data struct
+ *   @param[in] cbfn            - callback function
+ *   @param[in] *cbarg          - callback functioin arg
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
+               struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
+               void *cbarg)
+{
+       bfa_trc(diag, cnt);
+       bfa_trc(diag, data);
+
+       if (!bfa_ioc_is_operational(diag->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
+           ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
+               return BFA_STATUS_CMD_NOTSUPP;
+
+       /* check to see if there is another destructive diag cmd running */
+       if (diag->block || diag->fwping.lock) {
+               bfa_trc(diag, diag->block);
+               bfa_trc(diag, diag->fwping.lock);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       /* Initialization */
+       diag->fwping.lock = 1;
+       diag->fwping.cbfn = cbfn;
+       diag->fwping.cbarg = cbarg;
+       diag->fwping.result = result;
+       diag->fwping.data = data;
+       diag->fwping.count = cnt;
+
+       /* Init test results */
+       diag->fwping.result->data = 0;
+       diag->fwping.result->status = BFA_STATUS_OK;
+
+       /* kick off the first ping */
+       diag_fwping_send(diag);
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Read Temperature Sensor
+ *
+ *   @param[in] *diag           - diag data struct
+ *   @param[in] *result         - pt to bfa_diag_temp_t data struct
+ *   @param[in] cbfn            - callback function
+ *   @param[in] *cbarg          - callback functioin arg
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_diag_tsensor_query(struct bfa_diag_s *diag,
+               struct bfa_diag_results_tempsensor_s *result,
+               bfa_cb_diag_t cbfn, void *cbarg)
+{
+       /* check to see if there is a destructive diag cmd running */
+       if (diag->block || diag->tsensor.lock) {
+               bfa_trc(diag, diag->block);
+               bfa_trc(diag, diag->tsensor.lock);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       if (!bfa_ioc_is_operational(diag->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /* Init diag mod params */
+       diag->tsensor.lock = 1;
+       diag->tsensor.temp = result;
+       diag->tsensor.cbfn = cbfn;
+       diag->tsensor.cbarg = cbarg;
+
+       /* Send msg to fw */
+       diag_tempsensor_send(diag);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * LED Test command
+ *
+ *   @param[in] *diag           - diag data struct
+ *   @param[in] *ledtest        - pt to ledtest data structure
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
+{
+       bfa_trc(diag, ledtest->cmd);
+
+       if (!bfa_ioc_is_operational(diag->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       if (diag->beacon.state)
+               return BFA_STATUS_BEACON_ON;
+
+       if (diag->ledtest.lock)
+               return BFA_STATUS_LEDTEST_OP;
+
+       /* Send msg to fw */
+       diag->ledtest.lock = BFA_TRUE;
+       diag_ledtest_send(diag, ledtest);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Port beaconing command
+ *
+ *   @param[in] *diag           - diag data struct
+ *   @param[in] beacon          - port beaconing 1:ON   0:OFF
+ *   @param[in] link_e2e_beacon - link beaconing 1:ON   0:OFF
+ *   @param[in] sec             - beaconing duration in seconds
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
+               bfa_boolean_t link_e2e_beacon, uint32_t sec)
+{
+       bfa_trc(diag, beacon);
+       bfa_trc(diag, link_e2e_beacon);
+       bfa_trc(diag, sec);
+
+       if (!bfa_ioc_is_operational(diag->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       if (diag->ledtest.lock)
+               return BFA_STATUS_LEDTEST_OP;
+
+       if (diag->beacon.state && beacon)       /* beacon alread on */
+               return BFA_STATUS_BEACON_ON;
+
+       diag->beacon.state      = beacon;
+       diag->beacon.link_e2e   = link_e2e_beacon;
+       if (diag->cbfn_beacon)
+               diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
+
+       /* Send msg to fw */
+       diag_portbeacon_send(diag, beacon, sec);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Return DMA memory needed by diag module.
+ */
+u32
+bfa_diag_meminfo(void)
+{
+       return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ *     Attach virtual and physical memory for Diag.
+ */
+void
+bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
+       bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
+{
+       diag->dev = dev;
+       diag->ioc = ioc;
+       diag->trcmod = trcmod;
+
+       diag->block = 0;
+       diag->cbfn = NULL;
+       diag->cbarg = NULL;
+       diag->result = NULL;
+       diag->cbfn_beacon = cbfn_beacon;
+
+       bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
+       bfa_q_qe_init(&diag->ioc_notify);
+       bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
+       list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
+}
+
+void
+bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
+{
+       diag->fwping.dbuf_kva = dm_kva;
+       diag->fwping.dbuf_pa = dm_pa;
+       memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
+}
+
+/*
+ *     PHY module specific
+ */
+#define BFA_PHY_DMA_BUF_SZ     0x02000         /* 8k dma buffer */
+#define BFA_PHY_LOCK_STATUS    0x018878        /* phy semaphore status reg */
+
+static void
+bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
+{
+       int i, m = sz >> 2;
+
+       for (i = 0; i < m; i++)
+               obuf[i] = be32_to_cpu(ibuf[i]);
+}
+
+static bfa_boolean_t
+bfa_phy_present(struct bfa_phy_s *phy)
+{
+       return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
+}
+
+static void
+bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+       struct bfa_phy_s *phy = cbarg;
+
+       bfa_trc(phy, event);
+
+       switch (event) {
+       case BFA_IOC_E_DISABLED:
+       case BFA_IOC_E_FAILED:
+               if (phy->op_busy) {
+                       phy->status = BFA_STATUS_IOC_FAILURE;
+                       phy->cbfn(phy->cbarg, phy->status);
+                       phy->op_busy = 0;
+               }
+               break;
+
+       default:
+               break;
+       }
+}
+
+/*
+ * Send phy attribute query request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_query_send(void *cbarg)
+{
+       struct bfa_phy_s *phy = cbarg;
+       struct bfi_phy_query_req_s *msg =
+                       (struct bfi_phy_query_req_s *) phy->mb.msg;
+
+       msg->instance = phy->instance;
+       bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
+               bfa_ioc_portid(phy->ioc));
+       bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
+       bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+}
+
+/*
+ * Send phy write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_write_send(void *cbarg)
+{
+       struct bfa_phy_s *phy = cbarg;
+       struct bfi_phy_write_req_s *msg =
+                       (struct bfi_phy_write_req_s *) phy->mb.msg;
+       u32     len;
+       u16     *buf, *dbuf;
+       int     i, sz;
+
+       msg->instance = phy->instance;
+       msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
+       len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
+                       phy->residue : BFA_PHY_DMA_BUF_SZ;
+       msg->length = cpu_to_be32(len);
+
+       /* indicate if it's the last msg of the whole write operation */
+       msg->last = (len == phy->residue) ? 1 : 0;
+
+       bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
+               bfa_ioc_portid(phy->ioc));
+       bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
+
+       buf = (u16 *) (phy->ubuf + phy->offset);
+       dbuf = (u16 *)phy->dbuf_kva;
+       sz = len >> 1;
+       for (i = 0; i < sz; i++)
+               buf[i] = cpu_to_be16(dbuf[i]);
+
+       bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+
+       phy->residue -= len;
+       phy->offset += len;
+}
+
+/*
+ * Send phy read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_read_send(void *cbarg)
+{
+       struct bfa_phy_s *phy = cbarg;
+       struct bfi_phy_read_req_s *msg =
+                       (struct bfi_phy_read_req_s *) phy->mb.msg;
+       u32     len;
+
+       msg->instance = phy->instance;
+       msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
+       len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
+                       phy->residue : BFA_PHY_DMA_BUF_SZ;
+       msg->length = cpu_to_be32(len);
+       bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
+               bfa_ioc_portid(phy->ioc));
+       bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
+       bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+}
+
+/*
+ * Send phy stats request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_stats_send(void *cbarg)
+{
+       struct bfa_phy_s *phy = cbarg;
+       struct bfi_phy_stats_req_s *msg =
+                       (struct bfi_phy_stats_req_s *) phy->mb.msg;
+
+       msg->instance = phy->instance;
+       bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
+               bfa_ioc_portid(phy->ioc));
+       bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
+       bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+}
+
+/*
+ * Flash memory info API.
+ *
+ * @param[in] mincfg - minimal cfg variable
+ */
+u32
+bfa_phy_meminfo(bfa_boolean_t mincfg)
+{
+       /* min driver doesn't need phy */
+       if (mincfg)
+               return 0;
+
+       return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] ioc  - ioc structure
+ * @param[in] dev  - device structure
+ * @param[in] trcmod - trace module
+ * @param[in] logmod - log module
+ */
+void
+bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
+               struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
+{
+       phy->ioc = ioc;
+       phy->trcmod = trcmod;
+       phy->cbfn = NULL;
+       phy->cbarg = NULL;
+       phy->op_busy = 0;
+
+       bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
+       bfa_q_qe_init(&phy->ioc_notify);
+       bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
+       list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
+
+       /* min driver doesn't need phy */
+       if (mincfg) {
+               phy->dbuf_kva = NULL;
+               phy->dbuf_pa = 0;
+       }
+}
+
+/*
+ * Claim memory for phy
+ *
+ * @param[in] phy - phy structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ * @param[in] mincfg - minimal cfg variable
+ */
+void
+bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
+               bfa_boolean_t mincfg)
+{
+       if (mincfg)
+               return;
+
+       phy->dbuf_kva = dm_kva;
+       phy->dbuf_pa = dm_pa;
+       memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
+       dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+       dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+bfa_boolean_t
+bfa_phy_busy(struct bfa_ioc_s *ioc)
+{
+       void __iomem    *rb;
+
+       rb = bfa_ioc_bar0(ioc);
+       return readl(rb + BFA_PHY_LOCK_STATUS);
+}
+
+/*
+ * Get phy attribute.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] attr - phy attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
+               struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
+{
+       bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
+       bfa_trc(phy, instance);
+
+       if (!bfa_phy_present(phy))
+               return BFA_STATUS_PHY_NOT_PRESENT;
+
+       if (!bfa_ioc_is_operational(phy->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+               bfa_trc(phy, phy->op_busy);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       phy->op_busy = 1;
+       phy->cbfn = cbfn;
+       phy->cbarg = cbarg;
+       phy->instance = instance;
+       phy->ubuf = (uint8_t *) attr;
+       bfa_phy_query_send(phy);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Get phy stats.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] instance - phy image instance
+ * @param[in] stats - pointer to phy stats
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
+               struct bfa_phy_stats_s *stats,
+               bfa_cb_phy_t cbfn, void *cbarg)
+{
+       bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
+       bfa_trc(phy, instance);
+
+       if (!bfa_phy_present(phy))
+               return BFA_STATUS_PHY_NOT_PRESENT;
+
+       if (!bfa_ioc_is_operational(phy->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+               bfa_trc(phy, phy->op_busy);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       phy->op_busy = 1;
+       phy->cbfn = cbfn;
+       phy->cbarg = cbarg;
+       phy->instance = instance;
+       phy->ubuf = (u8 *) stats;
+       bfa_phy_stats_send(phy);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Update phy image.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] instance - phy image instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
+               void *buf, u32 len, u32 offset,
+               bfa_cb_phy_t cbfn, void *cbarg)
+{
+       bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
+       bfa_trc(phy, instance);
+       bfa_trc(phy, len);
+       bfa_trc(phy, offset);
+
+       if (!bfa_phy_present(phy))
+               return BFA_STATUS_PHY_NOT_PRESENT;
+
+       if (!bfa_ioc_is_operational(phy->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /* 'len' must be in word (4-byte) boundary */
+       if (!len || (len & 0x03))
+               return BFA_STATUS_FAILED;
+
+       if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+               bfa_trc(phy, phy->op_busy);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       phy->op_busy = 1;
+       phy->cbfn = cbfn;
+       phy->cbarg = cbarg;
+       phy->instance = instance;
+       phy->residue = len;
+       phy->offset = 0;
+       phy->addr_off = offset;
+       phy->ubuf = buf;
+
+       bfa_phy_write_send(phy);
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Read phy image.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] instance - phy image instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
+               void *buf, u32 len, u32 offset,
+               bfa_cb_phy_t cbfn, void *cbarg)
+{
+       bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
+       bfa_trc(phy, instance);
+       bfa_trc(phy, len);
+       bfa_trc(phy, offset);
+
+       if (!bfa_phy_present(phy))
+               return BFA_STATUS_PHY_NOT_PRESENT;
+
+       if (!bfa_ioc_is_operational(phy->ioc))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /* 'len' must be in word (4-byte) boundary */
+       if (!len || (len & 0x03))
+               return BFA_STATUS_FAILED;
+
+       if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+               bfa_trc(phy, phy->op_busy);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       phy->op_busy = 1;
+       phy->cbfn = cbfn;
+       phy->cbarg = cbarg;
+       phy->instance = instance;
+       phy->residue = len;
+       phy->offset = 0;
+       phy->addr_off = offset;
+       phy->ubuf = buf;
+       bfa_phy_read_send(phy);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * Process phy response messages upon receiving interrupts.
+ *
+ * @param[in] phyarg - phy structure
+ * @param[in] msg - message structure
+ */
+void
+bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
+{
+       struct bfa_phy_s *phy = phyarg;
+       u32     status;
+
+       union {
+               struct bfi_phy_query_rsp_s *query;
+               struct bfi_phy_stats_rsp_s *stats;
+               struct bfi_phy_write_rsp_s *write;
+               struct bfi_phy_read_rsp_s *read;
+               struct bfi_mbmsg_s   *msg;
+       } m;
+
+       m.msg = msg;
+       bfa_trc(phy, msg->mh.msg_id);
+
+       if (!phy->op_busy) {
+               /* receiving response after ioc failure */
+               bfa_trc(phy, 0x9999);
+               return;
+       }
+
+       switch (msg->mh.msg_id) {
+       case BFI_PHY_I2H_QUERY_RSP:
+               status = be32_to_cpu(m.query->status);
+               bfa_trc(phy, status);
+
+               if (status == BFA_STATUS_OK) {
+                       struct bfa_phy_attr_s *attr =
+                               (struct bfa_phy_attr_s *) phy->ubuf;
+                       bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
+                                       sizeof(struct bfa_phy_attr_s));
+                       bfa_trc(phy, attr->status);
+                       bfa_trc(phy, attr->length);
+               }
+
+               phy->status = status;
+               phy->op_busy = 0;
+               if (phy->cbfn)
+                       phy->cbfn(phy->cbarg, phy->status);
+               break;
+       case BFI_PHY_I2H_STATS_RSP:
+               status = be32_to_cpu(m.stats->status);
+               bfa_trc(phy, status);
+
+               if (status == BFA_STATUS_OK) {
+                       struct bfa_phy_stats_s *stats =
+                               (struct bfa_phy_stats_s *) phy->ubuf;
+                       bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
+                               sizeof(struct bfa_phy_stats_s));
+                               bfa_trc(phy, stats->status);
+               }
+
+               phy->status = status;
+               phy->op_busy = 0;
+               if (phy->cbfn)
+                       phy->cbfn(phy->cbarg, phy->status);
+               break;
+       case BFI_PHY_I2H_WRITE_RSP:
+               status = be32_to_cpu(m.write->status);
+               bfa_trc(phy, status);
+
+               if (status != BFA_STATUS_OK || phy->residue == 0) {
+                       phy->status = status;
+                       phy->op_busy = 0;
+                       if (phy->cbfn)
+                               phy->cbfn(phy->cbarg, phy->status);
+               } else {
+                       bfa_trc(phy, phy->offset);
+                       bfa_phy_write_send(phy);
+               }
+               break;
+       case BFI_PHY_I2H_READ_RSP:
+               status = be32_to_cpu(m.read->status);
+               bfa_trc(phy, status);
+
+               if (status != BFA_STATUS_OK) {
+                       phy->status = status;
+                       phy->op_busy = 0;
+                       if (phy->cbfn)
+                               phy->cbfn(phy->cbarg, phy->status);
+               } else {
+                       u32 len = be32_to_cpu(m.read->length);
+                       u16 *buf = (u16 *)(phy->ubuf + phy->offset);
+                       u16 *dbuf = (u16 *)phy->dbuf_kva;
+                       int i, sz = len >> 1;
+
+                       bfa_trc(phy, phy->offset);
+                       bfa_trc(phy, len);
+
+                       for (i = 0; i < sz; i++)
+                               buf[i] = be16_to_cpu(dbuf[i]);
+
+                       phy->residue -= len;
+                       phy->offset += len;
+
+                       if (phy->residue == 0) {
+                               phy->status = status;
+                               phy->op_busy = 0;
+                               if (phy->cbfn)
+                                       phy->cbfn(phy->cbarg, phy->status);
+                       } else
+                               bfa_phy_read_send(phy);
+               }
+               break;
+       default:
+               WARN_ON(1);
+       }
+}
+
+/*
+ *     DCONF module specific
+ */
+
+BFA_MODULE(dconf);
+
+/*
+ * DCONF state machine events
+ */
+enum bfa_dconf_event {
+       BFA_DCONF_SM_INIT               = 1,    /* dconf Init */
+       BFA_DCONF_SM_FLASH_COMP         = 2,    /* read/write to flash */
+       BFA_DCONF_SM_WR                 = 3,    /* binding change, map */
+       BFA_DCONF_SM_TIMEOUT            = 4,    /* Start timer */
+       BFA_DCONF_SM_EXIT               = 5,    /* exit dconf module */
+       BFA_DCONF_SM_IOCDISABLE         = 6,    /* IOC disable event */
+};
+
+/* forward declaration of DCONF state machine */
+static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
+                               enum bfa_dconf_event event);
+static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
+                               enum bfa_dconf_event event);
+static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
+                               enum bfa_dconf_event event);
+static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
+                               enum bfa_dconf_event event);
+static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
+                               enum bfa_dconf_event event);
+static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
+                               enum bfa_dconf_event event);
+static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
+                               enum bfa_dconf_event event);
+
+static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
+static void bfa_dconf_timer(void *cbarg);
+static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
+static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
+
+/*
+ * Begining state of dconf module. Waiting for an event to start.
+ */
+static void
+bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+       bfa_status_t bfa_status;
+       bfa_trc(dconf->bfa, event);
+
+       switch (event) {
+       case BFA_DCONF_SM_INIT:
+               if (dconf->min_cfg) {
+                       bfa_trc(dconf->bfa, dconf->min_cfg);
+                       return;
+               }
+               bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
+               dconf->flashdone = BFA_FALSE;
+               bfa_trc(dconf->bfa, dconf->flashdone);
+               bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
+                                       BFA_FLASH_PART_DRV, dconf->instance,
+                                       dconf->dconf,
+                                       sizeof(struct bfa_dconf_s), 0,
+                                       bfa_dconf_init_cb, dconf->bfa);
+               if (bfa_status != BFA_STATUS_OK) {
+                       bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
+                       bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+                       return;
+               }
+               break;
+       case BFA_DCONF_SM_EXIT:
+               dconf->flashdone = BFA_TRUE;
+       case BFA_DCONF_SM_IOCDISABLE:
+       case BFA_DCONF_SM_WR:
+       case BFA_DCONF_SM_FLASH_COMP:
+               break;
+       default:
+               bfa_sm_fault(dconf->bfa, event);
+       }
+}
+
+/*
+ * Read flash for dconf entries and make a call back to the driver once done.
+ */
+static void
+bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
+                       enum bfa_dconf_event event)
+{
+       bfa_trc(dconf->bfa, event);
+
+       switch (event) {
+       case BFA_DCONF_SM_FLASH_COMP:
+               bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+               break;
+       case BFA_DCONF_SM_TIMEOUT:
+               bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+               break;
+       case BFA_DCONF_SM_EXIT:
+               dconf->flashdone = BFA_TRUE;
+               bfa_trc(dconf->bfa, dconf->flashdone);
+       case BFA_DCONF_SM_IOCDISABLE:
+               bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+               break;
+       default:
+               bfa_sm_fault(dconf->bfa, event);
+       }
+}
+
+/*
+ * DCONF Module is in ready state. Has completed the initialization.
+ */
+static void
+bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+       bfa_trc(dconf->bfa, event);
+
+       switch (event) {
+       case BFA_DCONF_SM_WR:
+               bfa_timer_start(dconf->bfa, &dconf->timer,
+                       bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+               bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+               break;
+       case BFA_DCONF_SM_EXIT:
+               dconf->flashdone = BFA_TRUE;
+               bfa_trc(dconf->bfa, dconf->flashdone);
+               bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+               break;
+       case BFA_DCONF_SM_INIT:
+       case BFA_DCONF_SM_IOCDISABLE:
+               break;
+       default:
+               bfa_sm_fault(dconf->bfa, event);
+       }
+}
+
+/*
+ * entries are dirty, write back to the flash.
+ */
+
+static void
+bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+       bfa_trc(dconf->bfa, event);
+
+       switch (event) {
+       case BFA_DCONF_SM_TIMEOUT:
+               bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
+               bfa_dconf_flash_write(dconf);
+               break;
+       case BFA_DCONF_SM_WR:
+               bfa_timer_stop(&dconf->timer);
+               bfa_timer_start(dconf->bfa, &dconf->timer,
+                       bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+               break;
+       case BFA_DCONF_SM_EXIT:
+               bfa_timer_stop(&dconf->timer);
+               bfa_timer_start(dconf->bfa, &dconf->timer,
+                       bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+               bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
+               bfa_dconf_flash_write(dconf);
+               break;
+       case BFA_DCONF_SM_FLASH_COMP:
+               break;
+       case BFA_DCONF_SM_IOCDISABLE:
+               bfa_timer_stop(&dconf->timer);
+               bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
+               break;
+       default:
+               bfa_sm_fault(dconf->bfa, event);
+       }
+}
+
+/*
+ * Sync the dconf entries to the flash.
+ */
+static void
+bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
+                       enum bfa_dconf_event event)
+{
+       bfa_trc(dconf->bfa, event);
+
+       switch (event) {
+       case BFA_DCONF_SM_IOCDISABLE:
+       case BFA_DCONF_SM_FLASH_COMP:
+               bfa_timer_stop(&dconf->timer);
+       case BFA_DCONF_SM_TIMEOUT:
+               bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+               dconf->flashdone = BFA_TRUE;
+               bfa_trc(dconf->bfa, dconf->flashdone);
+               bfa_ioc_disable(&dconf->bfa->ioc);
+               break;
+       default:
+               bfa_sm_fault(dconf->bfa, event);
+       }
+}
+
+static void
+bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+       bfa_trc(dconf->bfa, event);
+
+       switch (event) {
+       case BFA_DCONF_SM_FLASH_COMP:
+               bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+               break;
+       case BFA_DCONF_SM_WR:
+               bfa_timer_start(dconf->bfa, &dconf->timer,
+                       bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+               bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+               break;
+       case BFA_DCONF_SM_EXIT:
+               bfa_timer_start(dconf->bfa, &dconf->timer,
+                       bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+               bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
+               break;
+       case BFA_DCONF_SM_IOCDISABLE:
+               bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
+               break;
+       default:
+               bfa_sm_fault(dconf->bfa, event);
+       }
+}
+
+static void
+bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
+                       enum bfa_dconf_event event)
+{
+       bfa_trc(dconf->bfa, event);
+
+       switch (event) {
+       case BFA_DCONF_SM_INIT:
+               bfa_timer_start(dconf->bfa, &dconf->timer,
+                       bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+               bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+               break;
+       case BFA_DCONF_SM_EXIT:
+               dconf->flashdone = BFA_TRUE;
+               bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+               break;
+       case BFA_DCONF_SM_IOCDISABLE:
+               break;
+       default:
+               bfa_sm_fault(dconf->bfa, event);
+       }
+}
+
+/*
+ * Compute and return memory needed by DRV_CFG module.
+ */
+static void
+bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+                 struct bfa_s *bfa)
+{
+       struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
+
+       if (cfg->drvcfg.min_cfg)
+               bfa_mem_kva_setup(meminfo, dconf_kva,
+                               sizeof(struct bfa_dconf_hdr_s));
+       else
+               bfa_mem_kva_setup(meminfo, dconf_kva,
+                               sizeof(struct bfa_dconf_s));
+}
+
+static void
+bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+               struct bfa_pcidev_s *pcidev)
+{
+       struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+
+       dconf->bfad = bfad;
+       dconf->bfa = bfa;
+       dconf->instance = bfa->ioc.port_id;
+       bfa_trc(bfa, dconf->instance);
+
+       dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
+       if (cfg->drvcfg.min_cfg) {
+               bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
+               dconf->min_cfg = BFA_TRUE;
+               /*
+                * Set the flashdone flag to TRUE explicitly as no flash
+                * write will happen in min_cfg mode.
+                */
+               dconf->flashdone = BFA_TRUE;
+       } else {
+               dconf->min_cfg = BFA_FALSE;
+               bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
+       }
+
+       bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
+       bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+}
+
+static void
+bfa_dconf_init_cb(void *arg, bfa_status_t status)
+{
+       struct bfa_s *bfa = arg;
+       struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+
+       dconf->flashdone = BFA_TRUE;
+       bfa_trc(bfa, dconf->flashdone);
+       bfa_iocfc_cb_dconf_modinit(bfa, status);
+       if (status == BFA_STATUS_OK) {
+               bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
+               if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
+                       dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
+               if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
+                       dconf->dconf->hdr.version = BFI_DCONF_VERSION;
+       }
+       bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
+}
+
+void
+bfa_dconf_modinit(struct bfa_s *bfa)
+{
+       struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+       bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
+}
+static void
+bfa_dconf_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_dconf_stop(struct bfa_s *bfa)
+{
+}
+
+static void bfa_dconf_timer(void *cbarg)
+{
+       struct bfa_dconf_mod_s *dconf = cbarg;
+       bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
+}
+static void
+bfa_dconf_iocdisable(struct bfa_s *bfa)
+{
+       struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+       bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
+}
+
+static void
+bfa_dconf_detach(struct bfa_s *bfa)
+{
+}
+
+static bfa_status_t
+bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
+{
+       bfa_status_t bfa_status;
+       bfa_trc(dconf->bfa, 0);
+
+       bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
+                               BFA_FLASH_PART_DRV, dconf->instance,
+                               dconf->dconf,  sizeof(struct bfa_dconf_s), 0,
+                               bfa_dconf_cbfn, dconf);
+       if (bfa_status != BFA_STATUS_OK)
+               WARN_ON(bfa_status);
+       bfa_trc(dconf->bfa, bfa_status);
+
+       return bfa_status;
+}
+
+bfa_status_t
+bfa_dconf_update(struct bfa_s *bfa)
+{
+       struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+       bfa_trc(dconf->bfa, 0);
+       if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
+               return BFA_STATUS_FAILED;
+
+       if (dconf->min_cfg) {
+               bfa_trc(dconf->bfa, dconf->min_cfg);
+               return BFA_STATUS_FAILED;
+       }
+
+       bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
+       return BFA_STATUS_OK;
+}
+
+static void
+bfa_dconf_cbfn(void *arg, bfa_status_t status)
+{
+       struct bfa_dconf_mod_s *dconf = arg;
+       WARN_ON(status);
+       bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
+}
+
+void
+bfa_dconf_modexit(struct bfa_s *bfa)
+{
+       struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+       BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE;
+       bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone);
+       bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
+}
index c85182a704fb5d8adaebc6e11b2d0279c8fd0c0e..546d46b371017102136b0d5b87d49347b424755c 100644 (file)
@@ -84,6 +84,68 @@ struct bfa_sge_s {
 #define bfa_sgaddr_le(_x)      (_x)
 #endif
 
+/*
+ * BFA memory resources
+ */
+struct bfa_mem_dma_s {
+       struct list_head qe;            /* Queue of DMA elements */
+       u32             mem_len;        /* Total Length in Bytes */
+       u8              *kva;           /* kernel virtual address */
+       u64             dma;            /* dma address if DMA memory */
+       u8              *kva_curp;      /* kva allocation cursor */
+       u64             dma_curp;       /* dma allocation cursor */
+};
+#define bfa_mem_dma_t struct bfa_mem_dma_s
+
+struct bfa_mem_kva_s {
+       struct list_head qe;            /* Queue of KVA elements */
+       u32             mem_len;        /* Total Length in Bytes */
+       u8              *kva;           /* kernel virtual address */
+       u8              *kva_curp;      /* kva allocation cursor */
+};
+#define bfa_mem_kva_t struct bfa_mem_kva_s
+
+struct bfa_meminfo_s {
+       struct bfa_mem_dma_s dma_info;
+       struct bfa_mem_kva_s kva_info;
+};
+
+/* BFA memory segment setup macros */
+#define bfa_mem_dma_setup(_meminfo, _dm_ptr, _seg_sz) do {     \
+       ((bfa_mem_dma_t *)(_dm_ptr))->mem_len = (_seg_sz);      \
+       if (_seg_sz)                                            \
+               list_add_tail(&((bfa_mem_dma_t *)_dm_ptr)->qe,  \
+                             &(_meminfo)->dma_info.qe);        \
+} while (0)
+
+#define bfa_mem_kva_setup(_meminfo, _kva_ptr, _seg_sz) do {    \
+       ((bfa_mem_kva_t *)(_kva_ptr))->mem_len = (_seg_sz);     \
+       if (_seg_sz)                                            \
+               list_add_tail(&((bfa_mem_kva_t *)_kva_ptr)->qe, \
+                             &(_meminfo)->kva_info.qe);        \
+} while (0)
+
+/* BFA dma memory segments iterator */
+#define bfa_mem_dma_sptr(_mod, _i)     (&(_mod)->dma_seg[(_i)])
+#define bfa_mem_dma_seg_iter(_mod, _sptr, _nr, _i)                     \
+       for (_i = 0, _sptr = bfa_mem_dma_sptr(_mod, _i); _i < (_nr);    \
+            _i++, _sptr = bfa_mem_dma_sptr(_mod, _i))
+
+#define bfa_mem_kva_curp(_mod) ((_mod)->kva_seg.kva_curp)
+#define bfa_mem_dma_virt(_sptr)        ((_sptr)->kva_curp)
+#define bfa_mem_dma_phys(_sptr)        ((_sptr)->dma_curp)
+#define bfa_mem_dma_len(_sptr) ((_sptr)->mem_len)
+
+/* Get the corresponding dma buf kva for a req - from the tag */
+#define bfa_mem_get_dmabuf_kva(_mod, _tag, _rqsz)                            \
+       (((u8 *)(_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].kva_curp) +\
+        BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
+
+/* Get the corresponding dma buf pa for a req - from the tag */
+#define bfa_mem_get_dmabuf_pa(_mod, _tag, _rqsz)                       \
+       ((_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].dma_curp +  \
+        BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
+
 /*
  * PCI device information required by IOC
  */
@@ -91,6 +153,7 @@ struct bfa_pcidev_s {
        int             pci_slot;
        u8              pci_func;
        u16             device_id;
+       u16             ssid;
        void __iomem    *pci_bar_kva;
 };
 
@@ -112,18 +175,6 @@ struct bfa_dma_s {
 #define BFI_SMEM_CB_SIZE       0x200000U       /* ! 2MB for crossbow   */
 #define BFI_SMEM_CT_SIZE       0x280000U       /* ! 2.5MB for catapult */
 
-
-#define bfa_dma_addr_set(dma_addr, pa) \
-               __bfa_dma_addr_set(&dma_addr, (u64)pa)
-
-static inline void
-__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
-{
-       dma_addr->a32.addr_lo = (__be32) pa;
-       dma_addr->a32.addr_hi = (__be32) (pa >> 32);
-}
-
-
 #define bfa_dma_be_addr_set(dma_addr, pa)      \
                __bfa_dma_be_addr_set(&dma_addr, (u64)pa)
 static inline void
@@ -133,11 +184,22 @@ __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
        dma_addr->a32.addr_hi = cpu_to_be32(pa >> 32);
 }
 
+#define bfa_alen_set(__alen, __len, __pa)      \
+       __bfa_alen_set(__alen, __len, (u64)__pa)
+
+static inline void
+__bfa_alen_set(struct bfi_alen_s *alen, u32 len, u64 pa)
+{
+       alen->al_len = cpu_to_be32(len);
+       bfa_dma_be_addr_set(alen->al_addr, pa);
+}
+
 struct bfa_ioc_regs_s {
        void __iomem *hfn_mbox_cmd;
        void __iomem *hfn_mbox;
        void __iomem *lpu_mbox_cmd;
        void __iomem *lpu_mbox;
+       void __iomem *lpu_read_stat;
        void __iomem *pss_ctl_reg;
        void __iomem *pss_err_status_reg;
        void __iomem *app_pll_fast_ctl_reg;
@@ -199,18 +261,26 @@ struct bfa_ioc_cbfn_s {
 };
 
 /*
- * Heartbeat failure notification queue element.
+ * IOC event notification mechanism.
  */
-struct bfa_ioc_hbfail_notify_s {
+enum bfa_ioc_event_e {
+       BFA_IOC_E_ENABLED       = 1,
+       BFA_IOC_E_DISABLED      = 2,
+       BFA_IOC_E_FAILED        = 3,
+};
+
+typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event_e);
+
+struct bfa_ioc_notify_s {
        struct list_head                qe;
-       bfa_ioc_hbfail_cbfn_t   cbfn;
+       bfa_ioc_notify_cbfn_t   cbfn;
        void                    *cbarg;
 };
 
 /*
- * Initialize a heartbeat failure notification structure
+ * Initialize a IOC event notification structure
  */
-#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do {    \
+#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do {    \
        (__notify)->cbfn = (__cbfn);      \
        (__notify)->cbarg = (__cbarg);      \
 } while (0)
@@ -218,8 +288,9 @@ struct bfa_ioc_hbfail_notify_s {
 struct bfa_iocpf_s {
        bfa_fsm_t               fsm;
        struct bfa_ioc_s        *ioc;
-       u32             retry_count;
+       bfa_boolean_t           fw_mismatch_notified;
        bfa_boolean_t           auto_recover;
+       u32                     poll_time;
 };
 
 struct bfa_ioc_s {
@@ -231,17 +302,15 @@ struct bfa_ioc_s {
        struct bfa_timer_s      sem_timer;
        struct bfa_timer_s      hb_timer;
        u32             hb_count;
-       struct list_head                hb_notify_q;
+       struct list_head        notify_q;
        void                    *dbg_fwsave;
        int                     dbg_fwsave_len;
        bfa_boolean_t           dbg_fwsave_once;
-       enum bfi_mclass         ioc_mc;
+       enum bfi_pcifn_class    clscode;
        struct bfa_ioc_regs_s   ioc_regs;
        struct bfa_trc_mod_s    *trcmod;
        struct bfa_ioc_drv_stats_s      stats;
        bfa_boolean_t           fcmode;
-       bfa_boolean_t           ctdev;
-       bfa_boolean_t           cna;
        bfa_boolean_t           pllinit;
        bfa_boolean_t           stats_busy;     /*  outstanding stats */
        u8                      port_id;
@@ -251,10 +320,18 @@ struct bfa_ioc_s {
        struct bfa_ioc_mbox_mod_s mbox_mod;
        struct bfa_ioc_hwif_s   *ioc_hwif;
        struct bfa_iocpf_s      iocpf;
+       enum bfi_asic_gen       asic_gen;
+       enum bfi_asic_mode      asic_mode;
+       enum bfi_port_mode      port0_mode;
+       enum bfi_port_mode      port1_mode;
+       enum bfa_mode_s         port_mode;
+       u8                      ad_cap_bm;      /* adapter cap bit mask */
+       u8                      port_mode_cfg;  /* config port mode */
+       int                     ioc_aen_seq;
 };
 
 struct bfa_ioc_hwif_s {
-       bfa_status_t (*ioc_pll_init) (void __iomem *rb, bfa_boolean_t fcmode);
+       bfa_status_t (*ioc_pll_init) (void __iomem *rb, enum bfi_asic_mode m);
        bfa_boolean_t   (*ioc_firmware_lock)    (struct bfa_ioc_s *ioc);
        void            (*ioc_firmware_unlock)  (struct bfa_ioc_s *ioc);
        void            (*ioc_reg_init) (struct bfa_ioc_s *ioc);
@@ -268,12 +345,400 @@ struct bfa_ioc_hwif_s {
        void            (*ioc_sync_leave)       (struct bfa_ioc_s *ioc);
        void            (*ioc_sync_ack)         (struct bfa_ioc_s *ioc);
        bfa_boolean_t   (*ioc_sync_complete)    (struct bfa_ioc_s *ioc);
+       bfa_boolean_t   (*ioc_lpu_read_stat)    (struct bfa_ioc_s *ioc);
+};
+
+/*
+ * Queue element to wait for room in request queue. FIFO order is
+ * maintained when fullfilling requests.
+ */
+struct bfa_reqq_wait_s {
+       struct list_head        qe;
+       void    (*qresume) (void *cbarg);
+       void    *cbarg;
+};
+
+typedef void   (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
+
+/*
+ * Generic BFA callback element.
+ */
+struct bfa_cb_qe_s {
+       struct list_head        qe;
+       bfa_cb_cbfn_t   cbfn;
+       bfa_boolean_t   once;
+       bfa_boolean_t   pre_rmv;        /* set for stack based qe(s) */
+       bfa_status_t    fw_status;      /* to access fw status in comp proc */
+       void            *cbarg;
+};
+
+/*
+ * ASIC block configurtion related
+ */
+
+typedef void (*bfa_ablk_cbfn_t)(void *, enum bfa_status);
+
+struct bfa_ablk_s {
+       struct bfa_ioc_s        *ioc;
+       struct bfa_ablk_cfg_s   *cfg;
+       u16                     *pcifn;
+       struct bfa_dma_s        dma_addr;
+       bfa_boolean_t           busy;
+       struct bfa_mbox_cmd_s   mb;
+       bfa_ablk_cbfn_t         cbfn;
+       void                    *cbarg;
+       struct bfa_ioc_notify_s ioc_notify;
+       struct bfa_mem_dma_s    ablk_dma;
+};
+#define BFA_MEM_ABLK_DMA(__bfa)                (&((__bfa)->modules.ablk.ablk_dma))
+
+/*
+ *     SFP module specific
+ */
+typedef void   (*bfa_cb_sfp_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_sfp_s {
+       void    *dev;
+       struct bfa_ioc_s        *ioc;
+       struct bfa_trc_mod_s    *trcmod;
+       struct sfp_mem_s        *sfpmem;
+       bfa_cb_sfp_t            cbfn;
+       void                    *cbarg;
+       enum bfi_sfp_mem_e      memtype; /* mem access type   */
+       u32                     status;
+       struct bfa_mbox_cmd_s   mbcmd;
+       u8                      *dbuf_kva; /* dma buf virtual address */
+       u64                     dbuf_pa;   /* dma buf physical address */
+       struct bfa_ioc_notify_s ioc_notify;
+       enum bfa_defs_sfp_media_e *media;
+       enum bfa_port_speed     portspeed;
+       bfa_cb_sfp_t            state_query_cbfn;
+       void                    *state_query_cbarg;
+       u8                      lock;
+       u8                      data_valid; /* data in dbuf is valid */
+       u8                      state;      /* sfp state  */
+       u8                      state_query_lock;
+       struct bfa_mem_dma_s    sfp_dma;
+       u8                      is_elb;     /* eloopback  */
+};
+
+#define BFA_SFP_MOD(__bfa)     (&(__bfa)->modules.sfp)
+#define BFA_MEM_SFP_DMA(__bfa) (&(BFA_SFP_MOD(__bfa)->sfp_dma))
+
+u32    bfa_sfp_meminfo(void);
+
+void   bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc,
+                       void *dev, struct bfa_trc_mod_s *trcmod);
+
+void   bfa_sfp_memclaim(struct bfa_sfp_s *diag, u8 *dm_kva, u64 dm_pa);
+void   bfa_sfp_intr(void *bfaarg, struct bfi_mbmsg_s *msg);
+
+bfa_status_t   bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
+                            bfa_cb_sfp_t cbfn, void *cbarg);
+
+bfa_status_t   bfa_sfp_media(struct bfa_sfp_s *sfp,
+                       enum bfa_defs_sfp_media_e *media,
+                       bfa_cb_sfp_t cbfn, void *cbarg);
+
+bfa_status_t   bfa_sfp_speed(struct bfa_sfp_s *sfp,
+                       enum bfa_port_speed portspeed,
+                       bfa_cb_sfp_t cbfn, void *cbarg);
+
+/*
+ *     Flash module specific
+ */
+typedef void   (*bfa_cb_flash_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_flash_s {
+       struct bfa_ioc_s *ioc;          /* back pointer to ioc */
+       struct bfa_trc_mod_s *trcmod;
+       u32             type;           /* partition type */
+       u8              instance;       /* partition instance */
+       u8              rsv[3];
+       u32             op_busy;        /*  operation busy flag */
+       u32             residue;        /*  residual length */
+       u32             offset;         /*  offset */
+       bfa_status_t    status;         /*  status */
+       u8              *dbuf_kva;      /*  dma buf virtual address */
+       u64             dbuf_pa;        /*  dma buf physical address */
+       struct bfa_reqq_wait_s  reqq_wait; /*  to wait for room in reqq */
+       bfa_cb_flash_t  cbfn;           /*  user callback function */
+       void            *cbarg;         /*  user callback arg */
+       u8              *ubuf;          /*  user supplied buffer */
+       struct bfa_cb_qe_s      hcb_qe; /*  comp: BFA callback qelem */
+       u32             addr_off;       /*  partition address offset */
+       struct bfa_mbox_cmd_s   mb;       /*  mailbox */
+       struct bfa_ioc_notify_s ioc_notify; /*  ioc event notify */
+       struct bfa_mem_dma_s    flash_dma;
+};
+
+#define BFA_FLASH(__bfa)               (&(__bfa)->modules.flash)
+#define BFA_MEM_FLASH_DMA(__bfa)       (&(BFA_FLASH(__bfa)->flash_dma))
+
+bfa_status_t bfa_flash_get_attr(struct bfa_flash_s *flash,
+                       struct bfa_flash_attr_s *attr,
+                       bfa_cb_flash_t cbfn, void *cbarg);
+bfa_status_t bfa_flash_erase_part(struct bfa_flash_s *flash,
+                       enum bfa_flash_part_type type, u8 instance,
+                       bfa_cb_flash_t cbfn, void *cbarg);
+bfa_status_t bfa_flash_update_part(struct bfa_flash_s *flash,
+                       enum bfa_flash_part_type type, u8 instance,
+                       void *buf, u32 len, u32 offset,
+                       bfa_cb_flash_t cbfn, void *cbarg);
+bfa_status_t bfa_flash_read_part(struct bfa_flash_s *flash,
+                       enum bfa_flash_part_type type, u8 instance, void *buf,
+                       u32 len, u32 offset, bfa_cb_flash_t cbfn, void *cbarg);
+u32    bfa_flash_meminfo(bfa_boolean_t mincfg);
+void bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc,
+               void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
+void bfa_flash_memclaim(struct bfa_flash_s *flash,
+               u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
+
+/*
+ *     DIAG module specific
+ */
+
+typedef void (*bfa_cb_diag_t) (void *cbarg, bfa_status_t status);
+typedef void (*bfa_cb_diag_beacon_t) (void *dev, bfa_boolean_t beacon,
+                       bfa_boolean_t link_e2e_beacon);
+
+/*
+ *      Firmware ping test results
+ */
+struct bfa_diag_results_fwping {
+       u32     data;   /* store the corrupted data */
+       u32     status;
+       u32     dmastatus;
+       u8      rsvd[4];
+};
+
+struct bfa_diag_qtest_result_s {
+       u32     status;
+       u16     count;  /* sucessful queue test count */
+       u8      queue;
+       u8      rsvd;   /* 64-bit align */
+};
+
+/*
+ * Firmware ping test results
+ */
+struct bfa_diag_fwping_s {
+       struct bfa_diag_results_fwping *result;
+       bfa_cb_diag_t  cbfn;
+       void            *cbarg;
+       u32             data;
+       u8              lock;
+       u8              rsv[3];
+       u32             status;
+       u32             count;
+       struct bfa_mbox_cmd_s   mbcmd;
+       u8              *dbuf_kva;      /* dma buf virtual address */
+       u64             dbuf_pa;        /* dma buf physical address */
+};
+
+/*
+ *      Temperature sensor query results
+ */
+struct bfa_diag_results_tempsensor_s {
+       u32     status;
+       u16     temp;           /* 10-bit A/D value */
+       u16     brd_temp;       /* 9-bit board temp */
+       u8      ts_junc;        /* show junction tempsensor   */
+       u8      ts_brd;         /* show board tempsensor      */
+       u8      rsvd[6];        /* keep 8 bytes alignment     */
+};
+
+struct bfa_diag_tsensor_s {
+       bfa_cb_diag_t   cbfn;
+       void            *cbarg;
+       struct bfa_diag_results_tempsensor_s *temp;
+       u8              lock;
+       u8              rsv[3];
+       u32             status;
+       struct bfa_mbox_cmd_s   mbcmd;
+};
+
+struct bfa_diag_sfpshow_s {
+       struct sfp_mem_s        *sfpmem;
+       bfa_cb_diag_t           cbfn;
+       void                    *cbarg;
+       u8      lock;
+       u8      static_data;
+       u8      rsv[2];
+       u32     status;
+       struct bfa_mbox_cmd_s    mbcmd;
+       u8      *dbuf_kva;      /* dma buf virtual address */
+       u64     dbuf_pa;        /* dma buf physical address */
+};
+
+struct bfa_diag_led_s {
+       struct bfa_mbox_cmd_s   mbcmd;
+       bfa_boolean_t   lock;   /* 1: ledtest is operating */
+};
+
+struct bfa_diag_beacon_s {
+       struct bfa_mbox_cmd_s   mbcmd;
+       bfa_boolean_t   state;          /* port beacon state */
+       bfa_boolean_t   link_e2e;       /* link beacon state */
+};
+
+struct bfa_diag_s {
+       void    *dev;
+       struct bfa_ioc_s                *ioc;
+       struct bfa_trc_mod_s            *trcmod;
+       struct bfa_diag_fwping_s        fwping;
+       struct bfa_diag_tsensor_s       tsensor;
+       struct bfa_diag_sfpshow_s       sfpshow;
+       struct bfa_diag_led_s           ledtest;
+       struct bfa_diag_beacon_s        beacon;
+       void    *result;
+       struct bfa_timer_s timer;
+       bfa_cb_diag_beacon_t  cbfn_beacon;
+       bfa_cb_diag_t  cbfn;
+       void            *cbarg;
+       u8              block;
+       u8              timer_active;
+       u8              rsvd[2];
+       u32             status;
+       struct bfa_ioc_notify_s ioc_notify;
+       struct bfa_mem_dma_s    diag_dma;
 };
 
+#define BFA_DIAG_MOD(__bfa)     (&(__bfa)->modules.diag_mod)
+#define BFA_MEM_DIAG_DMA(__bfa) (&(BFA_DIAG_MOD(__bfa)->diag_dma))
+
+u32    bfa_diag_meminfo(void);
+void bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa);
+void bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
+                    bfa_cb_diag_beacon_t cbfn_beacon,
+                    struct bfa_trc_mod_s *trcmod);
+bfa_status_t   bfa_diag_reg_read(struct bfa_diag_s *diag, u32 offset,
+                       u32 len, u32 *buf, u32 force);
+bfa_status_t   bfa_diag_reg_write(struct bfa_diag_s *diag, u32 offset,
+                       u32 len, u32 value, u32 force);
+bfa_status_t   bfa_diag_tsensor_query(struct bfa_diag_s *diag,
+                       struct bfa_diag_results_tempsensor_s *result,
+                       bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t   bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt,
+                       u32 pattern, struct bfa_diag_results_fwping *result,
+                       bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t   bfa_diag_sfpshow(struct bfa_diag_s *diag,
+                       struct sfp_mem_s *sfpmem, u8 static_data,
+                       bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t   bfa_diag_memtest(struct bfa_diag_s *diag,
+                       struct bfa_diag_memtest_s *memtest, u32 pattern,
+                       struct bfa_diag_memtest_result *result,
+                       bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t   bfa_diag_ledtest(struct bfa_diag_s *diag,
+                       struct bfa_diag_ledtest_s *ledtest);
+bfa_status_t   bfa_diag_beacon_port(struct bfa_diag_s *diag,
+                       bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon,
+                       u32 sec);
+
+/*
+ *     PHY module specific
+ */
+typedef void (*bfa_cb_phy_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_phy_s {
+       struct bfa_ioc_s *ioc;          /* back pointer to ioc */
+       struct bfa_trc_mod_s *trcmod;   /* trace module */
+       u8      instance;       /* port instance */
+       u8      op_busy;        /* operation busy flag */
+       u8      rsv[2];
+       u32     residue;        /* residual length */
+       u32     offset;         /* offset */
+       bfa_status_t    status;         /* status */
+       u8      *dbuf_kva;      /* dma buf virtual address */
+       u64     dbuf_pa;        /* dma buf physical address */
+       struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+       bfa_cb_phy_t    cbfn;           /* user callback function */
+       void            *cbarg;         /* user callback arg */
+       u8              *ubuf;          /* user supplied buffer */
+       struct bfa_cb_qe_s      hcb_qe; /* comp: BFA callback qelem */
+       u32     addr_off;       /* phy address offset */
+       struct bfa_mbox_cmd_s   mb;       /* mailbox */
+       struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
+       struct bfa_mem_dma_s    phy_dma;
+};
+#define BFA_PHY(__bfa) (&(__bfa)->modules.phy)
+#define BFA_MEM_PHY_DMA(__bfa) (&(BFA_PHY(__bfa)->phy_dma))
+
+bfa_boolean_t bfa_phy_busy(struct bfa_ioc_s *ioc);
+bfa_status_t bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
+                       struct bfa_phy_attr_s *attr,
+                       bfa_cb_phy_t cbfn, void *cbarg);
+bfa_status_t bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
+                       struct bfa_phy_stats_s *stats,
+                       bfa_cb_phy_t cbfn, void *cbarg);
+bfa_status_t bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
+                       void *buf, u32 len, u32 offset,
+                       bfa_cb_phy_t cbfn, void *cbarg);
+bfa_status_t bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
+                       void *buf, u32 len, u32 offset,
+                       bfa_cb_phy_t cbfn, void *cbarg);
+
+u32    bfa_phy_meminfo(bfa_boolean_t mincfg);
+void bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc,
+               void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
+void bfa_phy_memclaim(struct bfa_phy_s *phy,
+               u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
+void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
+
+/*
+ * Driver Config( dconf) specific
+ */
+#define BFI_DCONF_SIGNATURE    0xabcdabcd
+#define BFI_DCONF_VERSION      1
+
+#pragma pack(1)
+struct bfa_dconf_hdr_s {
+       u32     signature;
+       u32     version;
+};
+
+struct bfa_dconf_s {
+       struct bfa_dconf_hdr_s          hdr;
+       struct bfa_lunmask_cfg_s        lun_mask;
+};
+#pragma pack()
+
+struct bfa_dconf_mod_s {
+       bfa_sm_t                sm;
+       u8                      instance;
+       bfa_boolean_t           flashdone;
+       bfa_boolean_t           read_data_valid;
+       bfa_boolean_t           min_cfg;
+       struct bfa_timer_s      timer;
+       struct bfa_s            *bfa;
+       void                    *bfad;
+       void                    *trcmod;
+       struct bfa_dconf_s      *dconf;
+       struct bfa_mem_kva_s    kva_seg;
+};
+
+#define BFA_DCONF_MOD(__bfa)   \
+       (&(__bfa)->modules.dconf_mod)
+#define BFA_MEM_DCONF_KVA(__bfa)       (&(BFA_DCONF_MOD(__bfa)->kva_seg))
+#define bfa_dconf_read_data_valid(__bfa)       \
+       (BFA_DCONF_MOD(__bfa)->read_data_valid)
+#define BFA_DCONF_UPDATE_TOV   5000    /* memtest timeout in msec */
+
+void   bfa_dconf_modinit(struct bfa_s *bfa);
+void   bfa_dconf_modexit(struct bfa_s *bfa);
+bfa_status_t   bfa_dconf_update(struct bfa_s *bfa);
+
+/*
+ *     IOC specfic macros
+ */
 #define bfa_ioc_pcifn(__ioc)           ((__ioc)->pcidev.pci_func)
 #define bfa_ioc_devid(__ioc)           ((__ioc)->pcidev.device_id)
 #define bfa_ioc_bar0(__ioc)            ((__ioc)->pcidev.pci_bar_kva)
 #define bfa_ioc_portid(__ioc)          ((__ioc)->port_id)
+#define bfa_ioc_asic_gen(__ioc)                ((__ioc)->asic_gen)
+#define bfa_ioc_is_cna(__ioc)  \
+       ((bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_FCoE) ||      \
+        (bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_LL))
 #define bfa_ioc_fetch_stats(__ioc, __stats) \
                (((__stats)->drv_stats) = (__ioc)->stats)
 #define bfa_ioc_clr_stats(__ioc)       \
@@ -287,12 +752,9 @@ struct bfa_ioc_hwif_s {
 
 #define bfa_ioc_stats(_ioc, _stats)    ((_ioc)->stats._stats++)
 #define BFA_IOC_FWIMG_MINSZ    (16 * 1024)
-#define BFA_IOC_FWIMG_TYPE(__ioc)                                      \
-       (((__ioc)->ctdev) ?                                             \
-        (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) :     \
-        BFI_IMAGE_CB_FC)
-#define BFA_IOC_FW_SMEM_SIZE(__ioc)                                    \
-       (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
+#define BFA_IOC_FW_SMEM_SIZE(__ioc)                    \
+       ((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB)   \
+        ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE)
 #define BFA_IOC_FLASH_CHUNK_NO(off)            (off / BFI_FLASH_CHUNK_SZ_WORDS)
 #define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off)     (off % BFI_FLASH_CHUNK_SZ_WORDS)
 #define BFA_IOC_FLASH_CHUNK_ADDR(chunkno)  (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
@@ -305,7 +767,7 @@ void bfa_ioc_mbox_register(struct bfa_ioc_s *ioc,
                bfa_ioc_mbox_mcfunc_t *mcfuncs);
 void bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc);
 void bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len);
-void bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
+bfa_boolean_t bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
 void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
                bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
 
@@ -315,40 +777,49 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
 
 #define bfa_ioc_pll_init_asic(__ioc) \
        ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
-                          (__ioc)->fcmode))
+                          (__ioc)->asic_mode))
 
 bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
-bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode);
-bfa_boolean_t bfa_ioc_ct_pll_init_complete(void __iomem *rb);
-bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode);
+bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
+bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
+bfa_status_t bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
 
-#define        bfa_ioc_isr_mode_set(__ioc, __msix)                     \
-                       ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
+#define bfa_ioc_isr_mode_set(__ioc, __msix) do {                       \
+       if ((__ioc)->ioc_hwif->ioc_isr_mode_set)                        \
+               ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix));   \
+} while (0)
 #define        bfa_ioc_ownership_reset(__ioc)                          \
                        ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
+#define bfa_ioc_get_fcmode(__ioc)      ((__ioc)->fcmode)
+#define bfa_ioc_lpu_read_stat(__ioc) do {                      \
+       if ((__ioc)->ioc_hwif->ioc_lpu_read_stat)               \
+               ((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc));  \
+} while (0)
 
-
-void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
 void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc);
 
 void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
                struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod);
 void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
 void bfa_ioc_detach(struct bfa_ioc_s *ioc);
 void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
-               enum bfi_mclass mc);
+               enum bfi_pcifn_class clscode);
 void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa);
 void bfa_ioc_enable(struct bfa_ioc_s *ioc);
 void bfa_ioc_disable(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
 
 void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type,
-               u32 boot_param);
+               u32 boot_env);
 void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
 void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
 void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc);
@@ -372,16 +843,42 @@ bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
                                 int *trclen);
 bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
        u32 *offset, int *buflen);
-void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
-bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
 void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
                        struct bfi_ioc_image_hdr_s *fwhdr);
 bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
                        struct bfi_ioc_image_hdr_s *fwhdr);
+void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
 bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
 bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
 
+/*
+ * asic block configuration related APIs
+ */
+u32    bfa_ablk_meminfo(void);
+void bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa);
+void bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc);
+bfa_status_t bfa_ablk_query(struct bfa_ablk_s *ablk,
+               struct bfa_ablk_cfg_s *ablk_cfg,
+               bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_adapter_config(struct bfa_ablk_s *ablk,
+               enum bfa_mode_s mode, int max_pf, int max_vf,
+               bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port,
+               enum bfa_mode_s mode, int max_pf, int max_vf,
+               bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
+               u8 port, enum bfi_pcifn_class personality, int bw,
+               bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
+               bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
+               bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk,
+               bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk,
+               bfa_ablk_cbfn_t cbfn, void *cbarg);
+
 /*
  * bfa mfg wwn API functions
  */
@@ -391,50 +888,64 @@ mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
 /*
  * F/W Image Size & Chunk
  */
-extern u32 bfi_image_ct_fc_size;
-extern u32 bfi_image_ct_cna_size;
-extern u32 bfi_image_cb_fc_size;
-extern u32 *bfi_image_ct_fc;
-extern u32 *bfi_image_ct_cna;
-extern u32 *bfi_image_cb_fc;
+extern u32 bfi_image_cb_size;
+extern u32 bfi_image_ct_size;
+extern u32 bfi_image_ct2_size;
+extern u32 *bfi_image_cb;
+extern u32 *bfi_image_ct;
+extern u32 *bfi_image_ct2;
 
 static inline u32 *
-bfi_image_ct_fc_get_chunk(u32 off)
-{      return (u32 *)(bfi_image_ct_fc + off); }
+bfi_image_cb_get_chunk(u32 off)
+{
+       return (u32 *)(bfi_image_cb + off);
+}
 
 static inline u32 *
-bfi_image_ct_cna_get_chunk(u32 off)
-{      return (u32 *)(bfi_image_ct_cna + off); }
+bfi_image_ct_get_chunk(u32 off)
+{
+       return (u32 *)(bfi_image_ct + off);
+}
 
 static inline u32 *
-bfi_image_cb_fc_get_chunk(u32 off)
-{      return (u32 *)(bfi_image_cb_fc + off); }
+bfi_image_ct2_get_chunk(u32 off)
+{
+       return (u32 *)(bfi_image_ct2 + off);
+}
 
 static inline u32*
-bfa_cb_image_get_chunk(int type, u32 off)
+bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
 {
-       switch (type) {
-       case BFI_IMAGE_CT_FC:
-               return bfi_image_ct_fc_get_chunk(off);  break;
-       case BFI_IMAGE_CT_CNA:
-               return bfi_image_ct_cna_get_chunk(off); break;
-       case BFI_IMAGE_CB_FC:
-               return bfi_image_cb_fc_get_chunk(off);  break;
-       default: return NULL;
+       switch (asic_gen) {
+       case BFI_ASIC_GEN_CB:
+               return bfi_image_cb_get_chunk(off);
+               break;
+       case BFI_ASIC_GEN_CT:
+               return bfi_image_ct_get_chunk(off);
+               break;
+       case BFI_ASIC_GEN_CT2:
+               return bfi_image_ct2_get_chunk(off);
+               break;
+       default:
+               return NULL;
        }
 }
 
 static inline u32
-bfa_cb_image_get_size(int type)
+bfa_cb_image_get_size(enum bfi_asic_gen asic_gen)
 {
-       switch (type) {
-       case BFI_IMAGE_CT_FC:
-               return bfi_image_ct_fc_size;    break;
-       case BFI_IMAGE_CT_CNA:
-               return bfi_image_ct_cna_size;   break;
-       case BFI_IMAGE_CB_FC:
-               return bfi_image_cb_fc_size;    break;
-       default: return 0;
+       switch (asic_gen) {
+       case BFI_ASIC_GEN_CB:
+               return bfi_image_cb_size;
+               break;
+       case BFI_ASIC_GEN_CT:
+               return bfi_image_ct_size;
+               break;
+       case BFI_ASIC_GEN_CT2:
+               return bfi_image_ct2_size;
+               break;
+       default:
+               return 0;
        }
 }
 
index 89ae4c8f95a2bc9506f7cf902b28a331f69b0bfa..30df8a284715036945085f881db5461bffbc4957 100644 (file)
@@ -17,7 +17,7 @@
 
 #include "bfad_drv.h"
 #include "bfa_ioc.h"
-#include "bfi_cbreg.h"
+#include "bfi_reg.h"
 #include "bfa_defs.h"
 
 BFA_TRC_FILE(CNA, IOC_CB);
@@ -69,21 +69,6 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
 static bfa_boolean_t
 bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
 {
-       struct bfi_ioc_image_hdr_s fwhdr;
-       uint32_t fwstate = readl(ioc->ioc_regs.ioc_fwstate);
-
-       if (fwstate == BFI_IOC_UNINIT)
-               return BFA_TRUE;
-
-       bfa_ioc_fwver_get(ioc, &fwhdr);
-
-       if (swab32(fwhdr.exec) == BFI_BOOT_TYPE_NORMAL)
-               return BFA_TRUE;
-
-       bfa_trc(ioc, fwstate);
-       bfa_trc(ioc, fwhdr.exec);
-       writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
-
        return BFA_TRUE;
 }
 
@@ -98,7 +83,7 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
 static void
 bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc)
 {
-       writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
+       writel(~0U, ioc->ioc_regs.err_set);
        readl(ioc->ioc_regs.err_set);
 }
 
@@ -152,8 +137,8 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
         */
        ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
        ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
-       ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_400_CTL_REG);
-       ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_212_CTL_REG);
+       ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
+       ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
 
        /*
         * IOC semaphore registers and serialization
@@ -285,18 +270,18 @@ bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
 }
 
 bfa_status_t
-bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
+bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode)
 {
        u32     pll_sclk, pll_fclk;
 
-       pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN |
-               __APP_PLL_212_P0_1(3U) |
-               __APP_PLL_212_JITLMT0_1(3U) |
-               __APP_PLL_212_CNTLMT0_1(3U);
-       pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN |
-               __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
-               __APP_PLL_400_JITLMT0_1(3U) |
-               __APP_PLL_400_CNTLMT0_1(3U);
+       pll_sclk = __APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN |
+               __APP_PLL_SCLK_P0_1(3U) |
+               __APP_PLL_SCLK_JITLMT0_1(3U) |
+               __APP_PLL_SCLK_CNTLMT0_1(3U);
+       pll_fclk = __APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN |
+               __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
+               __APP_PLL_LCLK_JITLMT0_1(3U) |
+               __APP_PLL_LCLK_CNTLMT0_1(3U);
        writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
        writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
        writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
@@ -305,24 +290,24 @@ bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
        writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
        writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
        writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
-       writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG);
-       writel(__APP_PLL_212_BYPASS | __APP_PLL_212_LOGIC_SOFT_RESET,
-                       rb + APP_PLL_212_CTL_REG);
-       writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG);
-       writel(__APP_PLL_400_BYPASS | __APP_PLL_400_LOGIC_SOFT_RESET,
-                       rb + APP_PLL_400_CTL_REG);
+       writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
+       writel(__APP_PLL_SCLK_BYPASS | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_SCLK_CTL_REG);
+       writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
+       writel(__APP_PLL_LCLK_BYPASS | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_LCLK_CTL_REG);
        udelay(2);
-       writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG);
-       writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG);
-       writel(pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET,
-                       rb + APP_PLL_212_CTL_REG);
-       writel(pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET,
-                       rb + APP_PLL_400_CTL_REG);
+       writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
+       writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
+       writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_SCLK_CTL_REG);
+       writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_LCLK_CTL_REG);
        udelay(2000);
        writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
        writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
-       writel(pll_sclk, (rb + APP_PLL_212_CTL_REG));
-       writel(pll_fclk, (rb + APP_PLL_400_CTL_REG));
+       writel(pll_sclk, (rb + APP_PLL_SCLK_CTL_REG));
+       writel(pll_fclk, (rb + APP_PLL_LCLK_CTL_REG));
 
        return BFA_STATUS_OK;
 }
index 93612520f0d2bf3ca1810f3c9a48d93016484098..d1b8f0caaa79ed3f301d6c8b509339fee76d0169 100644 (file)
@@ -17,7 +17,7 @@
 
 #include "bfad_drv.h"
 #include "bfa_ioc.h"
-#include "bfi_ctreg.h"
+#include "bfi_reg.h"
 #include "bfa_defs.h"
 
 BFA_TRC_FILE(CNA, IOC_CT);
@@ -36,9 +36,6 @@ BFA_TRC_FILE(CNA, IOC_CT);
  */
 static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
-static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
-static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
-static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
 static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
 static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
@@ -48,29 +45,7 @@ static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
 static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
 
 static struct bfa_ioc_hwif_s hwif_ct;
-
-/*
- * Called from bfa_ioc_attach() to map asic specific calls.
- */
-void
-bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
-{
-       hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
-       hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
-       hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
-       hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
-       hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
-       hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
-       hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
-       hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
-       hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
-       hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
-       hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
-       hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
-       hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
-
-       ioc->ioc_hwif = &hwif_ct;
-}
+static struct bfa_ioc_hwif_s hwif_ct2;
 
 /*
  * Return true if firmware of current driver matches the running firmware.
@@ -82,16 +57,10 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
        u32 usecnt;
        struct bfi_ioc_image_hdr_s fwhdr;
 
-       /*
-        * Firmware match check is relevant only for CNA.
-        */
-       if (!ioc->cna)
-               return BFA_TRUE;
-
        /*
         * If bios boot (flash based) -- do not increment usage count
         */
-       if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
+       if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
                                                BFA_IOC_FWIMG_MINSZ)
                return BFA_TRUE;
 
@@ -103,6 +72,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
         */
        if (usecnt == 0) {
                writel(1, ioc->ioc_regs.ioc_usage_reg);
+               readl(ioc->ioc_regs.ioc_usage_sem_reg);
                writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
                writel(0, ioc->ioc_regs.ioc_fail_sync);
                bfa_trc(ioc, usecnt);
@@ -122,6 +92,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
         */
        bfa_ioc_fwver_get(ioc, &fwhdr);
        if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
+               readl(ioc->ioc_regs.ioc_usage_sem_reg);
                writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
                bfa_trc(ioc, usecnt);
                return BFA_FALSE;
@@ -132,6 +103,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
         */
        usecnt++;
        writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+       readl(ioc->ioc_regs.ioc_usage_sem_reg);
        writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
        bfa_trc(ioc, usecnt);
        return BFA_TRUE;
@@ -142,16 +114,10 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
 {
        u32 usecnt;
 
-       /*
-        * Firmware lock is relevant only for CNA.
-        */
-       if (!ioc->cna)
-               return;
-
        /*
         * If bios boot (flash based) -- do not decrement usage count
         */
-       if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
+       if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
                                                BFA_IOC_FWIMG_MINSZ)
                return;
 
@@ -166,6 +132,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
        writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
        bfa_trc(ioc, usecnt);
 
+       readl(ioc->ioc_regs.ioc_usage_sem_reg);
        writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 }
 
@@ -175,14 +142,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
 static void
 bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
 {
-       if (ioc->cna) {
+       if (bfa_ioc_is_cna(ioc)) {
                writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
                writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
                /* Wait for halt to take effect */
                readl(ioc->ioc_regs.ll_halt);
                readl(ioc->ioc_regs.alt_ll_halt);
        } else {
-               writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
+               writel(~0U, ioc->ioc_regs.err_set);
                readl(ioc->ioc_regs.err_set);
        }
 }
@@ -190,7 +157,7 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
 /*
  * Host to LPU mailbox message addresses
  */
-static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
+static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
        { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
        { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
        { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
@@ -200,21 +167,31 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
 /*
  * Host <-> LPU mailbox command/status registers - port 0
  */
-static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
-       { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
-       { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
-       { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
-       { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
+static struct { u32 hfn, lpu; } ct_p0reg[] = {
+       { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
+       { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
+       { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
+       { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
 };
 
 /*
  * Host <-> LPU mailbox command/status registers - port 1
  */
-static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
-       { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
-       { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
-       { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
-       { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
+static struct { u32 hfn, lpu; } ct_p1reg[] = {
+       { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
+       { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
+       { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
+       { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
+};
+
+static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; }
+       ct2_reg[] = {
+       { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
+         CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
+         CT2_HOSTFN_LPU0_READ_STAT},
+       { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
+         CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
+         CT2_HOSTFN_LPU1_READ_STAT},
 };
 
 static void
@@ -225,24 +202,24 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
 
        rb = bfa_ioc_bar0(ioc);
 
-       ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
-       ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
-       ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
+       ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
+       ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
+       ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
 
        if (ioc->port_id == 0) {
                ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
                ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
                ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
-               ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
-               ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
+               ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
+               ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
                ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
                ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
        } else {
                ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
                ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
                ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
-               ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
-               ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
+               ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
+               ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
                ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
                ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
        }
@@ -252,8 +229,8 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
         */
        ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
        ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
-       ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
-       ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
+       ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
+       ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
 
        /*
         * IOC semaphore registers and serialization
@@ -276,6 +253,64 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
        ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
 }
 
+static void
+bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
+{
+       void __iomem *rb;
+       int     port = bfa_ioc_portid(ioc);
+
+       rb = bfa_ioc_bar0(ioc);
+
+       ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
+       ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
+       ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
+       ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
+       ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
+       ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
+
+       if (port == 0) {
+               ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
+               ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
+               ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
+               ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+               ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
+       } else {
+               ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
+               ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
+               ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
+               ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+               ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
+       }
+
+       /*
+        * PSS control registers
+        */
+       ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+       ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
+       ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
+       ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
+
+       /*
+        * IOC semaphore registers and serialization
+        */
+       ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
+       ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
+       ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
+       ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
+       ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
+
+       /*
+        * sram memory access
+        */
+       ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+       ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+
+       /*
+        * err set reg : for notification of hb failure in fcmode
+        */
+       ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
 /*
  * Initialize IOC to port mapping.
  */
@@ -298,6 +333,19 @@ bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
        bfa_trc(ioc, ioc->port_id);
 }
 
+static void
+bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
+{
+       void __iomem    *rb = ioc->pcidev.pci_bar_kva;
+       u32     r32;
+
+       r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
+       ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
+
+       bfa_trc(ioc, bfa_ioc_pcifn(ioc));
+       bfa_trc(ioc, ioc->port_id);
+}
+
 /*
  * Set interrupt mode for a function: INTX or MSIX
  */
@@ -316,7 +364,7 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
        /*
         * If already in desired mode, do not change anything
         */
-       if (!msix && mode)
+       if ((!msix && mode) || (msix && !mode))
                return;
 
        if (msix)
@@ -331,6 +379,20 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
        writel(r32, rb + FNC_PERS_REG);
 }
 
+bfa_boolean_t
+bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
+{
+       u32     r32;
+
+       r32 = readl(ioc->ioc_regs.lpu_read_stat);
+       if (r32) {
+               writel(1, ioc->ioc_regs.lpu_read_stat);
+               return BFA_TRUE;
+       }
+
+       return BFA_FALSE;
+}
+
 /*
  * Cleanup hw semaphore and usecnt registers
  */
@@ -338,9 +400,10 @@ static void
 bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
 {
 
-       if (ioc->cna) {
+       if (bfa_ioc_is_cna(ioc)) {
                bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
                writel(0, ioc->ioc_regs.ioc_usage_reg);
+               readl(ioc->ioc_regs.ioc_usage_sem_reg);
                writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
        }
 
@@ -449,32 +512,99 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
        return BFA_FALSE;
 }
 
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+static void
+bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
+{
+       hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
+       hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
+       hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
+       hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+       hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
+       hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
+       hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
+       hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
+       hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
+}
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
+{
+       bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
+
+       hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
+       hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
+       hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
+       hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
+       ioc->ioc_hwif = &hwif_ct;
+}
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
+{
+       bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
+
+       hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init;
+       hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
+       hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
+       hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat;
+       hwif_ct2.ioc_isr_mode_set = NULL;
+       ioc->ioc_hwif = &hwif_ct2;
+}
+
 /*
- * Check the firmware state to know if pll_init has been completed already
+ * Workaround for MSI-X resource allocation for catapult-2 with no asic block
  */
-bfa_boolean_t
-bfa_ioc_ct_pll_init_complete(void __iomem *rb)
+#define HOSTFN_MSIX_DEFAULT            64
+#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR  0x30138
+#define HOSTFN_MSIX_VT_OFST_NUMVT      0x3013c
+#define __MSIX_VT_NUMVT__MK            0x003ff800
+#define __MSIX_VT_NUMVT__SH            11
+#define __MSIX_VT_NUMVT_(_v)           ((_v) << __MSIX_VT_NUMVT__SH)
+#define __MSIX_VT_OFST_                        0x000007ff
+void
+bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
 {
-       if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
-         (readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
-               return BFA_TRUE;
+       void __iomem *rb = ioc->pcidev.pci_bar_kva;
+       u32     r32;
 
-       return BFA_FALSE;
+       r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
+       if (r32 & __MSIX_VT_NUMVT__MK) {
+               writel(r32 & __MSIX_VT_OFST_,
+                       rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
+               return;
+       }
+
+       writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
+               HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
+               rb + HOSTFN_MSIX_VT_OFST_NUMVT);
+       writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
+               rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
 }
 
 bfa_status_t
-bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
+bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
 {
        u32     pll_sclk, pll_fclk, r32;
+       bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC);
+
+       pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
+               __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
+               __APP_PLL_SCLK_JITLMT0_1(3U) |
+               __APP_PLL_SCLK_CNTLMT0_1(1U);
+       pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
+               __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
+               __APP_PLL_LCLK_JITLMT0_1(3U) |
+               __APP_PLL_LCLK_CNTLMT0_1(1U);
 
-       pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
-               __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
-               __APP_PLL_312_JITLMT0_1(3U) |
-               __APP_PLL_312_CNTLMT0_1(1U);
-       pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
-               __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
-               __APP_PLL_425_JITLMT0_1(3U) |
-               __APP_PLL_425_CNTLMT0_1(1U);
        if (fcmode) {
                writel(0, (rb + OP_MODE));
                writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
@@ -491,20 +621,21 @@ bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
        writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
        writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
        writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
-       writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET,
-                       rb + APP_PLL_312_CTL_REG);
-       writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET,
-                       rb + APP_PLL_425_CTL_REG);
-       writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
-                       rb + APP_PLL_312_CTL_REG);
-       writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
-                       rb + APP_PLL_425_CTL_REG);
+       writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_SCLK_CTL_REG);
+       writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_LCLK_CTL_REG);
+       writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET |
+               __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
+       writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET |
+               __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
        readl(rb + HOSTFN0_INT_MSK);
        udelay(2000);
        writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
        writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
-       writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG);
-       writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG);
+       writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
+       writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
+
        if (!fcmode) {
                writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
                writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
@@ -524,3 +655,206 @@ bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
        writel(0, (rb + MBIST_CTL_REG));
        return BFA_STATUS_OK;
 }
+
+static void
+bfa_ioc_ct2_sclk_init(void __iomem *rb)
+{
+       u32 r32;
+
+       /*
+        * put s_clk PLL and PLL FSM in reset
+        */
+       r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+       r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
+       r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
+               __APP_PLL_SCLK_LOGIC_SOFT_RESET);
+       writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+       /*
+        * Ignore mode and program for the max clock (which is FC16)
+        * Firmware/NFC will do the PLL init appropiately
+        */
+       r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+       r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
+       writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+       /*
+        * while doing PLL init dont clock gate ethernet subsystem
+        */
+       r32 = readl((rb + CT2_CHIP_MISC_PRG));
+       writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
+
+       r32 = readl((rb + CT2_PCIE_MISC_REG));
+       writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
+
+       /*
+        * set sclk value
+        */
+       r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+       r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
+               __APP_PLL_SCLK_CLK_DIV2);
+       writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+       /*
+        * poll for s_clk lock or delay 1ms
+        */
+       udelay(1000);
+}
+
+static void
+bfa_ioc_ct2_lclk_init(void __iomem *rb)
+{
+       u32 r32;
+
+       /*
+        * put l_clk PLL and PLL FSM in reset
+        */
+       r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+       r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
+       r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
+               __APP_PLL_LCLK_LOGIC_SOFT_RESET);
+       writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+       /*
+        * set LPU speed (set for FC16 which will work for other modes)
+        */
+       r32 = readl((rb + CT2_CHIP_MISC_PRG));
+       writel(r32, (rb + CT2_CHIP_MISC_PRG));
+
+       /*
+        * set LPU half speed (set for FC16 which will work for other modes)
+        */
+       r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+       writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+       /*
+        * set lclk for mode (set for FC16)
+        */
+       r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+       r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
+       r32 |= 0x20c1731b;
+       writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+       /*
+        * poll for s_clk lock or delay 1ms
+        */
+       udelay(1000);
+}
+
+static void
+bfa_ioc_ct2_mem_init(void __iomem *rb)
+{
+       u32     r32;
+
+       r32 = readl((rb + PSS_CTL_REG));
+       r32 &= ~__PSS_LMEM_RESET;
+       writel(r32, (rb + PSS_CTL_REG));
+       udelay(1000);
+
+       writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
+       udelay(1000);
+       writel(0, (rb + CT2_MBIST_CTL_REG));
+}
+
+void
+bfa_ioc_ct2_mac_reset(void __iomem *rb)
+{
+       u32     r32;
+
+       bfa_ioc_ct2_sclk_init(rb);
+       bfa_ioc_ct2_lclk_init(rb);
+
+       /*
+        * release soft reset on s_clk & l_clk
+        */
+       r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+       writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+               (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+       /*
+        * release soft reset on s_clk & l_clk
+        */
+       r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+       writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+               (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+       /* put port0, port1 MAC & AHB in reset */
+       writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
+               rb + CT2_CSI_MAC_CONTROL_REG(0));
+       writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
+               rb + CT2_CSI_MAC_CONTROL_REG(1));
+}
+
+#define CT2_NFC_MAX_DELAY      1000
+bfa_status_t
+bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
+{
+       u32     wgn, r32;
+       int i;
+
+       /*
+        * Initialize PLL if not already done by NFC
+        */
+       wgn = readl(rb + CT2_WGN_STATUS);
+       if (!(wgn & __GLBL_PF_VF_CFG_RDY)) {
+               writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
+               for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+                       r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+                       if (r32 & __NFC_CONTROLLER_HALTED)
+                               break;
+                       udelay(1000);
+               }
+       }
+
+       /*
+        * Mask the interrupts and clear any
+        * pending interrupts.
+        */
+       writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
+       writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
+
+       r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+       if (r32 == 1) {
+               writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
+               readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+       }
+       r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+       if (r32 == 1) {
+               writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
+               readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+       }
+
+       bfa_ioc_ct2_mac_reset(rb);
+       bfa_ioc_ct2_sclk_init(rb);
+       bfa_ioc_ct2_lclk_init(rb);
+
+       /*
+        * release soft reset on s_clk & l_clk
+        */
+       r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+       writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+               (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+       /*
+        * release soft reset on s_clk & l_clk
+        */
+       r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+       writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+               (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+       /*
+        * Announce flash device presence, if flash was corrupted.
+        */
+       if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
+               r32 = readl((rb + PSS_GPIO_OUT_REG));
+               writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
+               r32 = readl((rb + PSS_GPIO_OE_REG));
+               writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
+       }
+
+       bfa_ioc_ct2_mem_init(rb);
+
+       writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
+       writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
+       return BFA_STATUS_OK;
+}
index ab79ff6fdeea6b8c7bc1c6f3b2b88af142e1f5fb..2d36e4823835d326bfc32c2c29f9a2d8e93d55c9 100644 (file)
 #include "bfa_port.h"
 
 struct bfa_modules_s {
+       struct bfa_fcdiag_s     fcdiag;         /* fcdiag module */
        struct bfa_fcport_s     fcport;         /*  fc port module            */
        struct bfa_fcxp_mod_s   fcxp_mod;       /*  fcxp module       */
        struct bfa_lps_mod_s    lps_mod;        /*  fcxp module       */
        struct bfa_uf_mod_s     uf_mod;         /*  unsolicited frame module */
        struct bfa_rport_mod_s  rport_mod;      /*  remote port module        */
-       struct bfa_fcpim_mod_s  fcpim_mod;      /*  FCP initiator module     */
+       struct bfa_fcp_mod_s    fcp_mod;        /*  FCP initiator module     */
        struct bfa_sgpg_mod_s   sgpg_mod;       /*  SG page module            */
        struct bfa_port_s       port;           /*  Physical port module     */
+       struct bfa_ablk_s       ablk;           /*  ASIC block config module */
+       struct bfa_cee_s        cee;            /*  CEE Module  */
+       struct bfa_sfp_s        sfp;            /*  SFP module  */
+       struct bfa_flash_s      flash;          /*  flash module */
+       struct bfa_diag_s       diag_mod;       /*  diagnostics module  */
+       struct bfa_phy_s        phy;            /*  phy module          */
+       struct bfa_dconf_mod_s  dconf_mod;      /*  DCONF common module */
 };
 
 /*
@@ -51,17 +59,16 @@ enum {
        BFA_TRC_HAL_IOCFC_CB    = 5,
 };
 
-
 /*
  * Macro to define a new BFA module
  */
 #define BFA_MODULE(__mod)                                              \
        static void bfa_ ## __mod ## _meminfo(                          \
-                       struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,      \
-                       u32 *dm_len);      \
+                       struct bfa_iocfc_cfg_s *cfg,                    \
+                       struct bfa_meminfo_s *meminfo,                  \
+                       struct bfa_s *bfa);                             \
        static void bfa_ ## __mod ## _attach(struct bfa_s *bfa,         \
                        void *bfad, struct bfa_iocfc_cfg_s *cfg,        \
-                       struct bfa_meminfo_s *meminfo,                  \
                        struct bfa_pcidev_s *pcidev);      \
        static void bfa_ ## __mod ## _detach(struct bfa_s *bfa);      \
        static void bfa_ ## __mod ## _start(struct bfa_s *bfa);      \
@@ -87,11 +94,11 @@ enum {
  * can leave entry points as NULL)
  */
 struct bfa_module_s {
-       void (*meminfo) (struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-                       u32 *dm_len);
+       void (*meminfo) (struct bfa_iocfc_cfg_s *cfg,
+                        struct bfa_meminfo_s *meminfo,
+                        struct bfa_s *bfa);
        void (*attach) (struct bfa_s *bfa, void *bfad,
                        struct bfa_iocfc_cfg_s *cfg,
-                       struct bfa_meminfo_s *meminfo,
                        struct bfa_pcidev_s *pcidev);
        void (*detach) (struct bfa_s *bfa);
        void (*start) (struct bfa_s *bfa);
@@ -109,19 +116,22 @@ struct bfa_s {
        struct bfa_timer_mod_s  timer_mod;      /*  timer module            */
        struct bfa_modules_s    modules;        /*  BFA modules     */
        struct list_head        comp_q;         /*  pending completions     */
-       bfa_boolean_t           rme_process;    /*  RME processing enabled  */
+       bfa_boolean_t           queue_process;  /*  queue processing enabled */
        struct list_head        reqq_waitq[BFI_IOC_MAX_CQS];
        bfa_boolean_t           fcs;            /*  FCS is attached to BFA */
        struct bfa_msix_s       msix;
+       int                     bfa_aen_seq;
 };
 
 extern bfa_boolean_t bfa_auto_recover;
+extern struct bfa_module_s hal_mod_fcdiag;
 extern struct bfa_module_s hal_mod_sgpg;
 extern struct bfa_module_s hal_mod_fcport;
 extern struct bfa_module_s hal_mod_fcxp;
 extern struct bfa_module_s hal_mod_lps;
 extern struct bfa_module_s hal_mod_uf;
 extern struct bfa_module_s hal_mod_rport;
-extern struct bfa_module_s hal_mod_fcpim;
+extern struct bfa_module_s hal_mod_fcp;
+extern struct bfa_module_s hal_mod_dconf;
 
 #endif /* __BFA_MODULES_H__ */
index 3f8e9d6066ecd04417ed01987c9117c9b0eb329f..95e4ad8759acfbcfc5d7ff27d29b40bac2e2f7b9 100644 (file)
@@ -24,8 +24,6 @@
 
 BFA_TRC_FILE(CNA, PORT);
 
-#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
-
 static void
 bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
 {
@@ -236,6 +234,12 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
 {
        struct bfi_port_generic_req_s *m;
 
+       /* If port is PBC disabled, return error */
+       if (port->pbc_disabled) {
+               bfa_trc(port, BFA_STATUS_PBC);
+               return BFA_STATUS_PBC;
+       }
+
        if (bfa_ioc_is_disabled(port->ioc)) {
                bfa_trc(port, BFA_STATUS_IOC_DISABLED);
                return BFA_STATUS_IOC_DISABLED;
@@ -280,6 +284,12 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
 {
        struct bfi_port_generic_req_s *m;
 
+       /* If port is PBC disabled, return error */
+       if (port->pbc_disabled) {
+               bfa_trc(port, BFA_STATUS_PBC);
+               return BFA_STATUS_PBC;
+       }
+
        if (bfa_ioc_is_disabled(port->ioc)) {
                bfa_trc(port, BFA_STATUS_IOC_DISABLED);
                return BFA_STATUS_IOC_DISABLED;
@@ -387,32 +397,43 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
 }
 
 /*
- * bfa_port_hbfail()
+ * bfa_port_notify()
  *
+ * Port module IOC event handler
  *
  * @param[in] Pointer to the Port module data structure.
+ * @param[in] IOC event structure
  *
  * @return void
  */
 void
-bfa_port_hbfail(void *arg)
+bfa_port_notify(void *arg, enum bfa_ioc_event_e event)
 {
        struct bfa_port_s *port = (struct bfa_port_s *) arg;
 
-       /* Fail any pending get_stats/clear_stats requests */
-       if (port->stats_busy) {
-               if (port->stats_cbfn)
-                       port->stats_cbfn(port->stats_cbarg, BFA_STATUS_FAILED);
-               port->stats_cbfn = NULL;
-               port->stats_busy = BFA_FALSE;
-       }
-
-       /* Clear any enable/disable is pending */
-       if (port->endis_pending) {
-               if (port->endis_cbfn)
-                       port->endis_cbfn(port->endis_cbarg, BFA_STATUS_FAILED);
-               port->endis_cbfn = NULL;
-               port->endis_pending = BFA_FALSE;
+       switch (event) {
+       case BFA_IOC_E_DISABLED:
+       case BFA_IOC_E_FAILED:
+               /* Fail any pending get_stats/clear_stats requests */
+               if (port->stats_busy) {
+                       if (port->stats_cbfn)
+                               port->stats_cbfn(port->stats_cbarg,
+                                               BFA_STATUS_FAILED);
+                       port->stats_cbfn = NULL;
+                       port->stats_busy = BFA_FALSE;
+               }
+
+               /* Clear any enable/disable is pending */
+               if (port->endis_pending) {
+                       if (port->endis_cbfn)
+                               port->endis_cbfn(port->endis_cbarg,
+                                               BFA_STATUS_FAILED);
+                       port->endis_cbfn = NULL;
+                       port->endis_pending = BFA_FALSE;
+               }
+               break;
+       default:
+               break;
        }
 }
 
@@ -445,10 +466,12 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
        port->endis_pending = BFA_FALSE;
        port->stats_cbfn = NULL;
        port->endis_cbfn = NULL;
+       port->pbc_disabled = BFA_FALSE;
 
        bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
-       bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
-       list_add_tail(&port->hbfail.qe, &port->ioc->hb_notify_q);
+       bfa_q_qe_init(&port->ioc_notify);
+       bfa_ioc_notify_init(&port->ioc_notify, bfa_port_notify, port);
+       list_add_tail(&port->ioc_notify.qe, &port->ioc->notify_q);
 
        /*
         * initialize time stamp for stats reset
@@ -458,3 +481,368 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
 
        bfa_trc(port, 0);
 }
+
+/*
+ *     CEE module specific definitions
+ */
+
+/*
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for get-attributes responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ *                 status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+       struct bfa_cee_lldp_cfg_s *lldp_cfg = &cee->attr->lldp_remote;
+
+       cee->get_attr_status = status;
+       bfa_trc(cee, 0);
+       if (status == BFA_STATUS_OK) {
+               bfa_trc(cee, 0);
+               memcpy(cee->attr, cee->attr_dma.kva,
+                       sizeof(struct bfa_cee_attr_s));
+               lldp_cfg->time_to_live = be16_to_cpu(lldp_cfg->time_to_live);
+               lldp_cfg->enabled_system_cap =
+                               be16_to_cpu(lldp_cfg->enabled_system_cap);
+       }
+       cee->get_attr_pending = BFA_FALSE;
+       if (cee->cbfn.get_attr_cbfn) {
+               bfa_trc(cee, 0);
+               cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
+       }
+}
+
+/*
+ * bfa_cee_get_stats_isr()
+ *
+ * @brief CEE ISR for get-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ *           status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+       u32 *buffer;
+       int i;
+
+       cee->get_stats_status = status;
+       bfa_trc(cee, 0);
+       if (status == BFA_STATUS_OK) {
+               bfa_trc(cee, 0);
+               memcpy(cee->stats, cee->stats_dma.kva,
+                       sizeof(struct bfa_cee_stats_s));
+               /* swap the cee stats */
+               buffer = (u32 *)cee->stats;
+               for (i = 0; i < (sizeof(struct bfa_cee_stats_s) /
+                                sizeof(u32)); i++)
+                       buffer[i] = cpu_to_be32(buffer[i]);
+       }
+       cee->get_stats_pending = BFA_FALSE;
+       bfa_trc(cee, 0);
+       if (cee->cbfn.get_stats_cbfn) {
+               bfa_trc(cee, 0);
+               cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
+       }
+}
+
+/*
+ * bfa_cee_reset_stats_isr()
+ *
+ * @brief CEE ISR for reset-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ *            status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+       cee->reset_stats_status = status;
+       cee->reset_stats_pending = BFA_FALSE;
+       if (cee->cbfn.reset_stats_cbfn)
+               cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
+}
+
+/*
+ * bfa_cee_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE module
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+u32
+bfa_cee_meminfo(void)
+{
+       return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ) +
+               BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * bfa_cee_mem_claim()
+ *
+ * @brief Initialized CEE DMA Memory
+ *
+ * @param[in] cee CEE module pointer
+ *            dma_kva Kernel Virtual Address of CEE DMA Memory
+ *            dma_pa  Physical Address of CEE DMA Memory
+ *
+ * @return void
+ */
+void
+bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa)
+{
+       cee->attr_dma.kva = dma_kva;
+       cee->attr_dma.pa = dma_pa;
+       cee->stats_dma.kva = dma_kva + BFA_ROUNDUP(
+                            sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
+       cee->stats_dma.pa = dma_pa + BFA_ROUNDUP(
+                            sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
+       cee->attr = (struct bfa_cee_attr_s *) dma_kva;
+       cee->stats = (struct bfa_cee_stats_s *) (dma_kva + BFA_ROUNDUP(
+                       sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ));
+}
+
+/*
+ * bfa_cee_get_attr()
+ *
+ * @brief
+ *   Send the request to the f/w to fetch CEE attributes.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr,
+                bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_cee_get_req_s *cmd;
+
+       WARN_ON((cee == NULL) || (cee->ioc == NULL));
+       bfa_trc(cee, 0);
+       if (!bfa_ioc_is_operational(cee->ioc)) {
+               bfa_trc(cee, 0);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+       if (cee->get_attr_pending == BFA_TRUE) {
+               bfa_trc(cee, 0);
+               return  BFA_STATUS_DEVBUSY;
+       }
+       cee->get_attr_pending = BFA_TRUE;
+       cmd = (struct bfi_cee_get_req_s *) cee->get_cfg_mb.msg;
+       cee->attr = attr;
+       cee->cbfn.get_attr_cbfn = cbfn;
+       cee->cbfn.get_attr_cbarg = cbarg;
+       bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
+               bfa_ioc_portid(cee->ioc));
+       bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
+       bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_cee_get_stats()
+ *
+ * @brief
+ *   Send the request to the f/w to fetch CEE statistics.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats,
+                 bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_cee_get_req_s *cmd;
+
+       WARN_ON((cee == NULL) || (cee->ioc == NULL));
+
+       if (!bfa_ioc_is_operational(cee->ioc)) {
+               bfa_trc(cee, 0);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+       if (cee->get_stats_pending == BFA_TRUE) {
+               bfa_trc(cee, 0);
+               return  BFA_STATUS_DEVBUSY;
+       }
+       cee->get_stats_pending = BFA_TRUE;
+       cmd = (struct bfi_cee_get_req_s *) cee->get_stats_mb.msg;
+       cee->stats = stats;
+       cee->cbfn.get_stats_cbfn = cbfn;
+       cee->cbfn.get_stats_cbarg = cbarg;
+       bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
+               bfa_ioc_portid(cee->ioc));
+       bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
+       bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_cee_reset_stats()
+ *
+ * @brief Clears CEE Stats in the f/w.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_reset_stats(struct bfa_cee_s *cee,
+                   bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg)
+{
+       struct bfi_cee_reset_stats_s *cmd;
+
+       WARN_ON((cee == NULL) || (cee->ioc == NULL));
+       if (!bfa_ioc_is_operational(cee->ioc)) {
+               bfa_trc(cee, 0);
+               return BFA_STATUS_IOC_FAILURE;
+       }
+       if (cee->reset_stats_pending == BFA_TRUE) {
+               bfa_trc(cee, 0);
+               return  BFA_STATUS_DEVBUSY;
+       }
+       cee->reset_stats_pending = BFA_TRUE;
+       cmd = (struct bfi_cee_reset_stats_s *) cee->reset_stats_mb.msg;
+       cee->cbfn.reset_stats_cbfn = cbfn;
+       cee->cbfn.reset_stats_cbarg = cbarg;
+       bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
+               bfa_ioc_portid(cee->ioc));
+       bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
+
+       return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_cee_isrs()
+ *
+ * @brief Handles Mail-box interrupts for CEE module.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return void
+ */
+
+void
+bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m)
+{
+       union bfi_cee_i2h_msg_u *msg;
+       struct bfi_cee_get_rsp_s *get_rsp;
+       struct bfa_cee_s *cee = (struct bfa_cee_s *) cbarg;
+       msg = (union bfi_cee_i2h_msg_u *) m;
+       get_rsp = (struct bfi_cee_get_rsp_s *) m;
+       bfa_trc(cee, msg->mh.msg_id);
+       switch (msg->mh.msg_id) {
+       case BFI_CEE_I2H_GET_CFG_RSP:
+               bfa_trc(cee, get_rsp->cmd_status);
+               bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
+               break;
+       case BFI_CEE_I2H_GET_STATS_RSP:
+               bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
+               break;
+       case BFI_CEE_I2H_RESET_STATS_RSP:
+               bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
+               break;
+       default:
+               WARN_ON(1);
+       }
+}
+
+/*
+ * bfa_cee_notify()
+ *
+ * @brief CEE module IOC event handler.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ * @param[in] IOC event type
+ *
+ * @return void
+ */
+
+void
+bfa_cee_notify(void *arg, enum bfa_ioc_event_e event)
+{
+       struct bfa_cee_s *cee = (struct bfa_cee_s *) arg;
+
+       bfa_trc(cee, event);
+
+       switch (event) {
+       case BFA_IOC_E_DISABLED:
+       case BFA_IOC_E_FAILED:
+               if (cee->get_attr_pending == BFA_TRUE) {
+                       cee->get_attr_status = BFA_STATUS_FAILED;
+                       cee->get_attr_pending  = BFA_FALSE;
+                       if (cee->cbfn.get_attr_cbfn) {
+                               cee->cbfn.get_attr_cbfn(
+                                       cee->cbfn.get_attr_cbarg,
+                                       BFA_STATUS_FAILED);
+                       }
+               }
+               if (cee->get_stats_pending == BFA_TRUE) {
+                       cee->get_stats_status = BFA_STATUS_FAILED;
+                       cee->get_stats_pending  = BFA_FALSE;
+                       if (cee->cbfn.get_stats_cbfn) {
+                               cee->cbfn.get_stats_cbfn(
+                               cee->cbfn.get_stats_cbarg,
+                               BFA_STATUS_FAILED);
+                       }
+               }
+               if (cee->reset_stats_pending == BFA_TRUE) {
+                       cee->reset_stats_status = BFA_STATUS_FAILED;
+                       cee->reset_stats_pending  = BFA_FALSE;
+                       if (cee->cbfn.reset_stats_cbfn) {
+                               cee->cbfn.reset_stats_cbfn(
+                               cee->cbfn.reset_stats_cbarg,
+                               BFA_STATUS_FAILED);
+                       }
+               }
+               break;
+
+       default:
+               break;
+       }
+}
+
+/*
+ * bfa_cee_attach()
+ *
+ * @brief CEE module-attach API
+ *
+ * @param[in] cee - Pointer to the CEE module data structure
+ *            ioc - Pointer to the ioc module data structure
+ *            dev - Pointer to the device driver module data structure
+ *                  The device driver specific mbox ISR functions have
+ *                  this pointer as one of the parameters.
+ *
+ * @return void
+ */
+void
+bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc,
+               void *dev)
+{
+       WARN_ON(cee == NULL);
+       cee->dev = dev;
+       cee->ioc = ioc;
+
+       bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
+       bfa_q_qe_init(&cee->ioc_notify);
+       bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
+       list_add_tail(&cee->ioc_notify.qe, &cee->ioc->notify_q);
+}
index c4ee9db6b4701f06ea7f825bc1413702aa55f357..947f897328d6df96111a1ea192c3c22cc0a8ebc8 100644 (file)
@@ -43,12 +43,16 @@ struct bfa_port_s {
        bfa_port_endis_cbfn_t           endis_cbfn;
        void                            *endis_cbarg;
        bfa_status_t                    endis_status;
-       struct bfa_ioc_hbfail_notify_s  hbfail;
+       struct bfa_ioc_notify_s         ioc_notify;
+       bfa_boolean_t                   pbc_disabled;
+       struct bfa_mem_dma_s            port_dma;
 };
 
+#define BFA_MEM_PORT_DMA(__bfa)                (&((__bfa)->modules.port.port_dma))
+
 void        bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
                                void *dev, struct bfa_trc_mod_s *trcmod);
-void        bfa_port_hbfail(void *arg);
+void   bfa_port_notify(void *arg, enum bfa_ioc_event_e event);
 
 bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,
                                 union bfa_port_stats_u *stats,
@@ -62,4 +66,58 @@ bfa_status_t bfa_port_disable(struct bfa_port_s *port,
 u32     bfa_port_meminfo(void);
 void        bfa_port_mem_claim(struct bfa_port_s *port,
                                 u8 *dma_kva, u64 dma_pa);
+
+/*
+ * CEE declaration
+ */
+typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, bfa_status_t status);
+typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, bfa_status_t status);
+typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, bfa_status_t status);
+
+struct bfa_cee_cbfn_s {
+       bfa_cee_get_attr_cbfn_t         get_attr_cbfn;
+       void                            *get_attr_cbarg;
+       bfa_cee_get_stats_cbfn_t        get_stats_cbfn;
+       void                            *get_stats_cbarg;
+       bfa_cee_reset_stats_cbfn_t      reset_stats_cbfn;
+       void                            *reset_stats_cbarg;
+};
+
+struct bfa_cee_s {
+       void *dev;
+       bfa_boolean_t           get_attr_pending;
+       bfa_boolean_t           get_stats_pending;
+       bfa_boolean_t           reset_stats_pending;
+       bfa_status_t            get_attr_status;
+       bfa_status_t            get_stats_status;
+       bfa_status_t            reset_stats_status;
+       struct bfa_cee_cbfn_s   cbfn;
+       struct bfa_ioc_notify_s ioc_notify;
+       struct bfa_trc_mod_s    *trcmod;
+       struct bfa_cee_attr_s   *attr;
+       struct bfa_cee_stats_s  *stats;
+       struct bfa_dma_s        attr_dma;
+       struct bfa_dma_s        stats_dma;
+       struct bfa_ioc_s        *ioc;
+       struct bfa_mbox_cmd_s   get_cfg_mb;
+       struct bfa_mbox_cmd_s   get_stats_mb;
+       struct bfa_mbox_cmd_s   reset_stats_mb;
+       struct bfa_mem_dma_s    cee_dma;
+};
+
+#define BFA_MEM_CEE_DMA(__bfa) (&((__bfa)->modules.cee.cee_dma))
+
+u32    bfa_cee_meminfo(void);
+void   bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa);
+void   bfa_cee_attach(struct bfa_cee_s *cee,
+                       struct bfa_ioc_s *ioc, void *dev);
+bfa_status_t   bfa_cee_get_attr(struct bfa_cee_s *cee,
+                               struct bfa_cee_attr_s *attr,
+                               bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
+bfa_status_t   bfa_cee_get_stats(struct bfa_cee_s *cee,
+                               struct bfa_cee_stats_s *stats,
+                               bfa_cee_get_stats_cbfn_t cbfn, void *cbarg);
+bfa_status_t   bfa_cee_reset_stats(struct bfa_cee_s *cee,
+                               bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg);
+
 #endif /* __BFA_PORT_H__ */
index 16d9a5f61c18b1151e31ba9d546f92cd7820ff5b..aa8a0eaf91f9c6b7a2e846dc51048726dd10c5a2 100644 (file)
  */
 
 #include "bfad_drv.h"
+#include "bfad_im.h"
 #include "bfa_plog.h"
 #include "bfa_cs.h"
 #include "bfa_modules.h"
 
 BFA_TRC_FILE(HAL, FCXP);
+BFA_MODULE(fcdiag);
 BFA_MODULE(fcxp);
 BFA_MODULE(sgpg);
 BFA_MODULE(lps);
@@ -113,11 +115,10 @@ static void       bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
 /*
  * forward declarations for LPS functions
  */
-static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
-                               u32 *dm_len);
+static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
+               struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
 static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
                                struct bfa_iocfc_cfg_s *cfg,
-                               struct bfa_meminfo_s *meminfo,
                                struct bfa_pcidev_s *pcidev);
 static void bfa_lps_detach(struct bfa_s *bfa);
 static void bfa_lps_start(struct bfa_s *bfa);
@@ -125,6 +126,7 @@ static void bfa_lps_stop(struct bfa_s *bfa);
 static void bfa_lps_iocdisable(struct bfa_s *bfa);
 static void bfa_lps_login_rsp(struct bfa_s *bfa,
                                struct bfi_lps_login_rsp_s *rsp);
+static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
                                struct bfi_lps_logout_rsp_s *rsp);
 static void bfa_lps_reqq_resume(void *lps_arg);
@@ -430,51 +432,17 @@ bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
  */
 
 static void
-claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
-{
-       u8             *dm_kva = NULL;
-       u64     dm_pa;
-       u32     buf_pool_sz;
-
-       dm_kva = bfa_meminfo_dma_virt(mi);
-       dm_pa = bfa_meminfo_dma_phys(mi);
-
-       buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
-
-       /*
-        * Initialize the fcxp req payload list
-        */
-       mod->req_pld_list_kva = dm_kva;
-       mod->req_pld_list_pa = dm_pa;
-       dm_kva += buf_pool_sz;
-       dm_pa += buf_pool_sz;
-       memset(mod->req_pld_list_kva, 0, buf_pool_sz);
-
-       /*
-        * Initialize the fcxp rsp payload list
-        */
-       buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
-       mod->rsp_pld_list_kva = dm_kva;
-       mod->rsp_pld_list_pa = dm_pa;
-       dm_kva += buf_pool_sz;
-       dm_pa += buf_pool_sz;
-       memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
-
-       bfa_meminfo_dma_virt(mi) = dm_kva;
-       bfa_meminfo_dma_phys(mi) = dm_pa;
-}
-
-static void
-claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
+claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
 {
        u16     i;
        struct bfa_fcxp_s *fcxp;
 
-       fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
+       fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
        memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
 
        INIT_LIST_HEAD(&mod->fcxp_free_q);
        INIT_LIST_HEAD(&mod->fcxp_active_q);
+       INIT_LIST_HEAD(&mod->fcxp_unused_q);
 
        mod->fcxp_list = fcxp;
 
@@ -489,40 +457,53 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
                fcxp = fcxp + 1;
        }
 
-       bfa_meminfo_kva(mi) = (void *)fcxp;
+       bfa_mem_kva_curp(mod) = (void *)fcxp;
 }
 
 static void
-bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
-                u32 *dm_len)
+bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+               struct bfa_s *bfa)
 {
-       u16     num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
+       struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
+       struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
+       struct bfa_mem_dma_s *seg_ptr;
+       u16     nsegs, idx, per_seg_fcxp;
+       u16     num_fcxps = cfg->fwcfg.num_fcxp_reqs;
+       u32     per_fcxp_sz;
 
-       if (num_fcxp_reqs == 0)
+       if (num_fcxps == 0)
                return;
 
-       /*
-        * Account for req/rsp payload
-        */
-       *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
        if (cfg->drvcfg.min_cfg)
-               *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
+               per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
        else
-               *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
+               per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
 
-       /*
-        * Account for fcxp structs
-        */
-       *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
+       /* dma memory */
+       nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
+       per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
+
+       bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
+               if (num_fcxps >= per_seg_fcxp) {
+                       num_fcxps -= per_seg_fcxp;
+                       bfa_mem_dma_setup(minfo, seg_ptr,
+                               per_seg_fcxp * per_fcxp_sz);
+               } else
+                       bfa_mem_dma_setup(minfo, seg_ptr,
+                               num_fcxps * per_fcxp_sz);
+       }
+
+       /* kva memory */
+       bfa_mem_kva_setup(minfo, fcxp_kva,
+               cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
 }
 
 static void
 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-               struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+               struct bfa_pcidev_s *pcidev)
 {
        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
 
-       memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
        mod->bfa = bfa;
        mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
 
@@ -535,8 +516,7 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 
        INIT_LIST_HEAD(&mod->wait_q);
 
-       claim_fcxp_req_rsp_mem(mod, meminfo);
-       claim_fcxps_mem(mod, meminfo);
+       claim_fcxps_mem(mod);
 }
 
 static void
@@ -561,6 +541,9 @@ bfa_fcxp_iocdisable(struct bfa_s *bfa)
        struct bfa_fcxp_s *fcxp;
        struct list_head              *qe, *qen;
 
+       /* Enqueue unused fcxp resources to free_q */
+       list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q);
+
        list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
                fcxp = (struct bfa_fcxp_s *) qe;
                if (fcxp->caller == NULL) {
@@ -749,23 +732,6 @@ hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
        }
 }
 
-static void
-hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
-{
-       union bfi_addr_u      sga_zero = { {0} };
-
-       sge->sg_len = reqlen;
-       sge->flags = BFI_SGE_DATA_LAST;
-       bfa_dma_addr_set(sge[0].sga, req_pa);
-       bfa_sge_to_be(sge);
-       sge++;
-
-       sge->sga = sga_zero;
-       sge->sg_len = reqlen;
-       sge->flags = BFI_SGE_PGDLEN;
-       bfa_sge_to_be(sge);
-}
-
 static void
 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
                 struct fchs_s *fchs)
@@ -846,7 +812,7 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
        struct bfa_rport_s              *rport = reqi->bfa_rport;
 
        bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
-                   bfa_lpuid(bfa));
+                   bfa_fn_lpu(bfa));
 
        send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
        if (rport) {
@@ -860,7 +826,7 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
        }
 
        send_req->vf_id = cpu_to_be16(reqi->vf_id);
-       send_req->lp_tag = reqi->lp_tag;
+       send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
        send_req->class = reqi->class;
        send_req->rsp_timeout = rspi->rsp_timeout;
        send_req->cts = reqi->cts;
@@ -873,18 +839,16 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
         * setup req sgles
         */
        if (fcxp->use_ireqbuf == 1) {
-               hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
+               bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
                                        BFA_FCXP_REQ_PLD_PA(fcxp));
        } else {
                if (fcxp->nreq_sgles > 0) {
                        WARN_ON(fcxp->nreq_sgles != 1);
-                       hal_fcxp_set_local_sges(send_req->req_sge,
-                                               reqi->req_tot_len,
-                                               fcxp->req_sga_cbfn(fcxp->caller,
-                                                                  0));
+                       bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
+                               fcxp->req_sga_cbfn(fcxp->caller, 0));
                } else {
                        WARN_ON(reqi->req_tot_len != 0);
-                       hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
+                       bfa_alen_set(&send_req->rsp_alen, 0, 0);
                }
        }
 
@@ -894,25 +858,23 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
        if (fcxp->use_irspbuf == 1) {
                WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
 
-               hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
+               bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
                                        BFA_FCXP_RSP_PLD_PA(fcxp));
-
        } else {
                if (fcxp->nrsp_sgles > 0) {
                        WARN_ON(fcxp->nrsp_sgles != 1);
-                       hal_fcxp_set_local_sges(send_req->rsp_sge,
-                                               rspi->rsp_maxlen,
-                                               fcxp->rsp_sga_cbfn(fcxp->caller,
-                                                                  0));
+                       bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
+                               fcxp->rsp_sga_cbfn(fcxp->caller, 0));
+
                } else {
                        WARN_ON(rspi->rsp_maxlen != 0);
-                       hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
+                       bfa_alen_set(&send_req->rsp_alen, 0, 0);
                }
        }
 
        hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
 
-       bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
+       bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
 
        bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
        bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
@@ -978,8 +940,8 @@ bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
        void    *reqbuf;
 
        WARN_ON(fcxp->use_ireqbuf != 1);
-       reqbuf = ((u8 *)mod->req_pld_list_kva) +
-               fcxp->fcxp_tag * mod->req_pld_sz;
+       reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
+                               mod->req_pld_sz + mod->rsp_pld_sz);
        return reqbuf;
 }
 
@@ -1002,13 +964,15 @@ void *
 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
 {
        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
-       void    *rspbuf;
+       void    *fcxp_buf;
 
        WARN_ON(fcxp->use_irspbuf != 1);
 
-       rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
-               fcxp->fcxp_tag * mod->rsp_pld_sz;
-       return rspbuf;
+       fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
+                               mod->req_pld_sz + mod->rsp_pld_sz);
+
+       /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
+       return ((u8 *) fcxp_buf) + mod->req_pld_sz;
 }
 
 /*
@@ -1181,6 +1145,18 @@ bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
        return mod->rsp_pld_sz;
 }
 
+void
+bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
+{
+       struct bfa_fcxp_mod_s   *mod = BFA_FCXP_MOD(bfa);
+       struct list_head        *qe;
+       int     i;
+
+       for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
+               bfa_q_deq_tail(&mod->fcxp_free_q, &qe);
+               list_add_tail(qe, &mod->fcxp_unused_q);
+       }
+}
 
 /*
  *  BFA LPS state machine functions
@@ -1192,7 +1168,7 @@ bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
 static void
 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
 {
-       bfa_trc(lps->bfa, lps->lp_tag);
+       bfa_trc(lps->bfa, lps->bfa_tag);
        bfa_trc(lps->bfa, event);
 
        switch (event) {
@@ -1244,7 +1220,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
 static void
 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
 {
-       bfa_trc(lps->bfa, lps->lp_tag);
+       bfa_trc(lps->bfa, lps->bfa_tag);
        bfa_trc(lps->bfa, event);
 
        switch (event) {
@@ -1278,6 +1254,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
                break;
 
        case BFA_LPS_SM_OFFLINE:
+       case BFA_LPS_SM_DELETE:
                bfa_sm_set_state(lps, bfa_lps_sm_init);
                break;
 
@@ -1297,7 +1274,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
 static void
 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
 {
-       bfa_trc(lps->bfa, lps->lp_tag);
+       bfa_trc(lps->bfa, lps->bfa_tag);
        bfa_trc(lps->bfa, event);
 
        switch (event) {
@@ -1306,6 +1283,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
                break;
 
        case BFA_LPS_SM_OFFLINE:
+       case BFA_LPS_SM_DELETE:
                bfa_sm_set_state(lps, bfa_lps_sm_init);
                bfa_reqq_wcancel(&lps->wqe);
                break;
@@ -1329,7 +1307,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
 static void
 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
 {
-       bfa_trc(lps->bfa, lps->lp_tag);
+       bfa_trc(lps->bfa, lps->bfa_tag);
        bfa_trc(lps->bfa, event);
 
        switch (event) {
@@ -1378,7 +1356,7 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
 static void
 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
 {
-       bfa_trc(lps->bfa, lps->lp_tag);
+       bfa_trc(lps->bfa, lps->bfa_tag);
        bfa_trc(lps->bfa, event);
 
        switch (event) {
@@ -1420,7 +1398,7 @@ bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
 static void
 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
 {
-       bfa_trc(lps->bfa, lps->lp_tag);
+       bfa_trc(lps->bfa, lps->bfa_tag);
        bfa_trc(lps->bfa, event);
 
        switch (event) {
@@ -1430,6 +1408,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
                break;
 
        case BFA_LPS_SM_OFFLINE:
+       case BFA_LPS_SM_DELETE:
                bfa_sm_set_state(lps, bfa_lps_sm_init);
                break;
 
@@ -1444,7 +1423,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
 static void
 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
 {
-       bfa_trc(lps->bfa, lps->lp_tag);
+       bfa_trc(lps->bfa, lps->bfa_tag);
        bfa_trc(lps->bfa, event);
 
        switch (event) {
@@ -1454,6 +1433,7 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
                break;
 
        case BFA_LPS_SM_OFFLINE:
+       case BFA_LPS_SM_DELETE:
                bfa_sm_set_state(lps, bfa_lps_sm_init);
                bfa_reqq_wcancel(&lps->wqe);
                break;
@@ -1473,13 +1453,17 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
  * return memory requirement
  */
 static void
-bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
-       u32 *dm_len)
+bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+               struct bfa_s *bfa)
 {
+       struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
+
        if (cfg->drvcfg.min_cfg)
-               *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
+               bfa_mem_kva_setup(minfo, lps_kva,
+                       sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
        else
-               *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
+               bfa_mem_kva_setup(minfo, lps_kva,
+                       sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
 }
 
 /*
@@ -1487,28 +1471,28 @@ bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
  */
 static void
 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-       struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+       struct bfa_pcidev_s *pcidev)
 {
        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
        struct bfa_lps_s        *lps;
        int                     i;
 
-       memset(mod, 0, sizeof(struct bfa_lps_mod_s));
        mod->num_lps = BFA_LPS_MAX_LPORTS;
        if (cfg->drvcfg.min_cfg)
                mod->num_lps = BFA_LPS_MIN_LPORTS;
        else
                mod->num_lps = BFA_LPS_MAX_LPORTS;
-       mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
+       mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
 
-       bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
+       bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
 
        INIT_LIST_HEAD(&mod->lps_free_q);
        INIT_LIST_HEAD(&mod->lps_active_q);
+       INIT_LIST_HEAD(&mod->lps_login_q);
 
        for (i = 0; i < mod->num_lps; i++, lps++) {
                lps->bfa        = bfa;
-               lps->lp_tag     = (u8) i;
+               lps->bfa_tag    = (u8) i;
                lps->reqq       = BFA_REQQ_LPS;
                bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
                list_add_tail(&lps->qe, &mod->lps_free_q);
@@ -1544,6 +1528,11 @@ bfa_lps_iocdisable(struct bfa_s *bfa)
                lps = (struct bfa_lps_s *) qe;
                bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
        }
+       list_for_each_safe(qe, qen, &mod->lps_login_q) {
+               lps = (struct bfa_lps_s *) qe;
+               bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
+       }
+       list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
 }
 
 /*
@@ -1555,12 +1544,13 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
        struct bfa_lps_s        *lps;
 
-       WARN_ON(rsp->lp_tag >= mod->num_lps);
-       lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
+       WARN_ON(rsp->bfa_tag >= mod->num_lps);
+       lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
 
        lps->status = rsp->status;
        switch (rsp->status) {
        case BFA_STATUS_OK:
+               lps->fw_tag     = rsp->fw_tag;
                lps->fport      = rsp->f_port;
                if (lps->fport)
                        lps->lp_pid = rsp->lp_pid;
@@ -1572,6 +1562,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
                lps->lp_mac     = rsp->lp_mac;
                lps->brcd_switch = rsp->brcd_switch;
                lps->fcf_mac    = rsp->fcf_mac;
+               lps->pr_bbscn   = rsp->bb_scn;
 
                break;
 
@@ -1586,14 +1577,46 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
 
                break;
 
+       case BFA_STATUS_VPORT_MAX:
+               if (!rsp->ext_status)
+                       bfa_lps_no_res(lps, rsp->ext_status);
+               break;
+
        default:
                /* Nothing to do with other status */
                break;
        }
 
+       list_del(&lps->qe);
+       list_add_tail(&lps->qe, &mod->lps_active_q);
        bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
 }
 
+static void
+bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
+{
+       struct bfa_s            *bfa = first_lps->bfa;
+       struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
+       struct list_head        *qe, *qe_next;
+       struct bfa_lps_s        *lps;
+
+       bfa_trc(bfa, count);
+
+       qe = bfa_q_next(first_lps);
+
+       while (count && qe) {
+               qe_next = bfa_q_next(qe);
+               lps = (struct bfa_lps_s *)qe;
+               bfa_trc(bfa, lps->bfa_tag);
+               lps->status = first_lps->status;
+               list_del(&lps->qe);
+               list_add_tail(&lps->qe, &mod->lps_active_q);
+               bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
+               qe = qe_next;
+               count--;
+       }
+}
+
 /*
  * Firmware logout response
  */
@@ -1603,8 +1626,8 @@ bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
        struct bfa_lps_s        *lps;
 
-       WARN_ON(rsp->lp_tag >= mod->num_lps);
-       lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
+       WARN_ON(rsp->bfa_tag >= mod->num_lps);
+       lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
 
        bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
 }
@@ -1618,7 +1641,7 @@ bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
        struct bfa_lps_s        *lps;
 
-       lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
+       lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
 
        bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
 }
@@ -1653,23 +1676,27 @@ bfa_lps_free(struct bfa_lps_s *lps)
 static void
 bfa_lps_send_login(struct bfa_lps_s *lps)
 {
+       struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(lps->bfa);
        struct bfi_lps_login_req_s      *m;
 
        m = bfa_reqq_next(lps->bfa, lps->reqq);
        WARN_ON(!m);
 
        bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
-               bfa_lpuid(lps->bfa));
+               bfa_fn_lpu(lps->bfa));
 
-       m->lp_tag       = lps->lp_tag;
+       m->bfa_tag      = lps->bfa_tag;
        m->alpa         = lps->alpa;
        m->pdu_size     = cpu_to_be16(lps->pdusz);
        m->pwwn         = lps->pwwn;
        m->nwwn         = lps->nwwn;
        m->fdisc        = lps->fdisc;
        m->auth_en      = lps->auth_en;
+       m->bb_scn       = lps->bb_scn;
 
-       bfa_reqq_produce(lps->bfa, lps->reqq);
+       bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
+       list_del(&lps->qe);
+       list_add_tail(&lps->qe, &mod->lps_login_q);
 }
 
 /*
@@ -1684,11 +1711,11 @@ bfa_lps_send_logout(struct bfa_lps_s *lps)
        WARN_ON(!m);
 
        bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
-               bfa_lpuid(lps->bfa));
+               bfa_fn_lpu(lps->bfa));
 
-       m->lp_tag    = lps->lp_tag;
+       m->fw_tag = lps->fw_tag;
        m->port_name = lps->pwwn;
-       bfa_reqq_produce(lps->bfa, lps->reqq);
+       bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
 }
 
 /*
@@ -1703,11 +1730,11 @@ bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
        WARN_ON(!m);
 
        bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
-               bfa_lpuid(lps->bfa));
+               bfa_fn_lpu(lps->bfa));
 
-       m->lp_tag = lps->lp_tag;
+       m->fw_tag = lps->fw_tag;
        m->lp_pid = lps->lp_pid;
-       bfa_reqq_produce(lps->bfa, lps->reqq);
+       bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
 }
 
 /*
@@ -1859,7 +1886,7 @@ bfa_lps_delete(struct bfa_lps_s *lps)
  */
 void
 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
-       wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
+       wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn)
 {
        lps->uarg       = uarg;
        lps->alpa       = alpa;
@@ -1868,6 +1895,7 @@ bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
        lps->nwwn       = nwwn;
        lps->fdisc      = BFA_FALSE;
        lps->auth_en    = auth_en;
+       lps->bb_scn     = bb_scn;
        bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
 }
 
@@ -1898,6 +1926,13 @@ bfa_lps_fdisclogo(struct bfa_lps_s *lps)
        bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
 }
 
+u8
+bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
+{
+       struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
+
+       return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
+}
 
 /*
  * Return lport services tag given the pid
@@ -1911,7 +1946,7 @@ bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
 
        for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
                if (lps->lp_pid == pid)
-                       return lps->lp_tag;
+                       return lps->bfa_tag;
        }
 
        /* Return base port tag anyway */
@@ -1936,7 +1971,7 @@ bfa_lps_get_base_pid(struct bfa_s *bfa)
 void
 bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
 {
-       bfa_trc(lps->bfa, lps->lp_tag);
+       bfa_trc(lps->bfa, lps->bfa_tag);
        bfa_trc(lps->bfa, n2n_pid);
 
        lps->lp_pid = n2n_pid;
@@ -1955,15 +1990,15 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
        msg.msg = m;
 
        switch (m->mhdr.msg_id) {
-       case BFI_LPS_H2I_LOGIN_RSP:
+       case BFI_LPS_I2H_LOGIN_RSP:
                bfa_lps_login_rsp(bfa, msg.login_rsp);
                break;
 
-       case BFI_LPS_H2I_LOGOUT_RSP:
+       case BFI_LPS_I2H_LOGOUT_RSP:
                bfa_lps_logout_rsp(bfa, msg.logout_rsp);
                break;
 
-       case BFI_LPS_H2I_CVL_EVENT:
+       case BFI_LPS_I2H_CVL_EVENT:
                bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
                break;
 
@@ -1973,6 +2008,24 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
        }
 }
 
+static void
+bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
+{
+       struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+       struct bfa_aen_entry_s  *aen_entry;
+
+       bfad_get_aen_entry(bfad, aen_entry);
+       if (!aen_entry)
+               return;
+
+       aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
+       aen_entry->aen_data.port.pwwn = fcport->pwwn;
+
+       /* Send the AEN notification */
+       bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
+                                 BFA_AEN_CAT_PORT, event);
+}
+
 /*
  * FC PORT state machine functions
  */
@@ -2061,6 +2114,7 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
                wwn2str(pwwn_buf, fcport->pwwn);
                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                        "Base port disabled: WWN = %s\n", pwwn_buf);
+               bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
                break;
 
        case BFA_FCPORT_SM_LINKUP:
@@ -2121,6 +2175,7 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
                wwn2str(pwwn_buf, fcport->pwwn);
                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                        "Base port disabled: WWN = %s\n", pwwn_buf);
+               bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
                break;
 
        case BFA_FCPORT_SM_STOP:
@@ -2174,6 +2229,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
                wwn2str(pwwn_buf, fcport->pwwn);
                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                        "Base port online: WWN = %s\n", pwwn_buf);
+               bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
+
+               /* If QoS is enabled and it is not online, send AEN */
+               if (fcport->cfg.qos_enabled &&
+                   fcport->qos_attr.state != BFA_QOS_ONLINE)
+                       bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
                break;
 
        case BFA_FCPORT_SM_LINKDOWN:
@@ -2200,6 +2261,7 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
                wwn2str(pwwn_buf, fcport->pwwn);
                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                        "Base port disabled: WWN = %s\n", pwwn_buf);
+               bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
                break;
 
        case BFA_FCPORT_SM_STOP:
@@ -2245,8 +2307,10 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
                wwn2str(pwwn_buf, fcport->pwwn);
                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                        "Base port offline: WWN = %s\n", pwwn_buf);
+               bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                        "Base port disabled: WWN = %s\n", pwwn_buf);
+               bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
                break;
 
        case BFA_FCPORT_SM_LINKDOWN:
@@ -2256,26 +2320,32 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
                                BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
                wwn2str(pwwn_buf, fcport->pwwn);
-               if (BFA_PORT_IS_DISABLED(fcport->bfa))
+               if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
                        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                                "Base port offline: WWN = %s\n", pwwn_buf);
-               else
+                       bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+               } else {
                        BFA_LOG(KERN_ERR, bfad, bfa_log_level,
                                "Base port (WWN = %s) "
                                "lost fabric connectivity\n", pwwn_buf);
+                       bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+               }
                break;
 
        case BFA_FCPORT_SM_STOP:
                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
                bfa_fcport_reset_linkinfo(fcport);
                wwn2str(pwwn_buf, fcport->pwwn);
-               if (BFA_PORT_IS_DISABLED(fcport->bfa))
+               if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
                        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                                "Base port offline: WWN = %s\n", pwwn_buf);
-               else
+                       bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+               } else {
                        BFA_LOG(KERN_ERR, bfad, bfa_log_level,
                                "Base port (WWN = %s) "
                                "lost fabric connectivity\n", pwwn_buf);
+                       bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+               }
                break;
 
        case BFA_FCPORT_SM_HWFAIL:
@@ -2283,13 +2353,16 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
                bfa_fcport_reset_linkinfo(fcport);
                bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
                wwn2str(pwwn_buf, fcport->pwwn);
-               if (BFA_PORT_IS_DISABLED(fcport->bfa))
+               if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
                        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                                "Base port offline: WWN = %s\n", pwwn_buf);
-               else
+                       bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+               } else {
                        BFA_LOG(KERN_ERR, bfad, bfa_log_level,
                                "Base port (WWN = %s) "
                                "lost fabric connectivity\n", pwwn_buf);
+                       bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+               }
                break;
 
        default:
@@ -2420,6 +2493,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
                wwn2str(pwwn_buf, fcport->pwwn);
                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                        "Base port enabled: WWN = %s\n", pwwn_buf);
+               bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
                break;
 
        case BFA_FCPORT_SM_STOP:
@@ -2474,6 +2548,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
                wwn2str(pwwn_buf, fcport->pwwn);
                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                        "Base port enabled: WWN = %s\n", pwwn_buf);
+               bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
                break;
 
        case BFA_FCPORT_SM_DISABLE:
@@ -2777,10 +2852,12 @@ bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
                                                        BFA_CACHELINE_SZ))
 
 static void
-bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
-               u32 *dm_len)
+bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+                  struct bfa_s *bfa)
 {
-       *dm_len += FCPORT_STATS_DMA_SZ;
+       struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
+
+       bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
 }
 
 static void
@@ -2792,23 +2869,14 @@ bfa_fcport_qresume(void *cbarg)
 }
 
 static void
-bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
+bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
 {
-       u8              *dm_kva;
-       u64     dm_pa;
-
-       dm_kva = bfa_meminfo_dma_virt(meminfo);
-       dm_pa  = bfa_meminfo_dma_phys(meminfo);
-
-       fcport->stats_kva = dm_kva;
-       fcport->stats_pa  = dm_pa;
-       fcport->stats     = (union bfa_fcport_stats_u *) dm_kva;
+       struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
 
-       dm_kva += FCPORT_STATS_DMA_SZ;
-       dm_pa  += FCPORT_STATS_DMA_SZ;
-
-       bfa_meminfo_dma_virt(meminfo) = dm_kva;
-       bfa_meminfo_dma_phys(meminfo) = dm_pa;
+       fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
+       fcport->stats_pa  = bfa_mem_dma_phys(fcport_dma);
+       fcport->stats = (union bfa_fcport_stats_u *)
+                               bfa_mem_dma_virt(fcport_dma);
 }
 
 /*
@@ -2816,18 +2884,17 @@ bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
  */
 static void
 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-               struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+               struct bfa_pcidev_s *pcidev)
 {
        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
        struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
        struct bfa_fcport_ln_s *ln = &fcport->ln;
        struct timeval tv;
 
-       memset(fcport, 0, sizeof(struct bfa_fcport_s));
        fcport->bfa = bfa;
        ln->fcport = fcport;
 
-       bfa_fcport_mem_claim(fcport, meminfo);
+       bfa_fcport_mem_claim(fcport);
 
        bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
        bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
@@ -2848,6 +2915,9 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 
        port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
 
+       INIT_LIST_HEAD(&fcport->stats_pending_q);
+       INIT_LIST_HEAD(&fcport->statsclr_pending_q);
+
        bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
 }
 
@@ -2921,6 +2991,7 @@ bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
 {
        fcport->speed = BFA_PORT_SPEED_UNKNOWN;
        fcport->topology = BFA_PORT_TOPOLOGY_NONE;
+       fcport->bbsc_op_state = BFA_FALSE;
 }
 
 /*
@@ -2948,7 +3019,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
        }
 
        bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
-                       bfa_lpuid(fcport->bfa));
+                       bfa_fn_lpu(fcport->bfa));
        m->nwwn = fcport->nwwn;
        m->pwwn = fcport->pwwn;
        m->port_cfg = fcport->cfg;
@@ -2962,7 +3033,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
        return BFA_TRUE;
 }
 
@@ -2991,13 +3062,13 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
        }
 
        bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
-                       bfa_lpuid(fcport->bfa));
+                       bfa_fn_lpu(fcport->bfa));
        m->msgtag = fcport->msgtag;
 
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
 
        return BFA_TRUE;
 }
@@ -3029,13 +3100,14 @@ bfa_fcport_send_txcredit(void *port_cbarg)
        }
 
        bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
-                       bfa_lpuid(fcport->bfa));
+                       bfa_fn_lpu(fcport->bfa));
        m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
+       m->bb_scn = fcport->cfg.bb_scn;
 
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
 }
 
 static void
@@ -3074,30 +3146,38 @@ bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
 static void
 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
 {
-       struct bfa_fcport_s *fcport = cbarg;
+       struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
+       struct bfa_cb_pending_q_s *cb;
+       struct list_head *qe, *qen;
+       union bfa_fcport_stats_u *ret;
 
        if (complete) {
-               if (fcport->stats_status == BFA_STATUS_OK) {
-                       struct timeval tv;
-
-                       /* Swap FC QoS or FCoE stats */
-                       if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
-                               bfa_fcport_qos_stats_swap(
-                                       &fcport->stats_ret->fcqos,
-                                       &fcport->stats->fcqos);
-                       } else {
-                               bfa_fcport_fcoe_stats_swap(
-                                       &fcport->stats_ret->fcoe,
-                                       &fcport->stats->fcoe);
-
-                               do_gettimeofday(&tv);
-                               fcport->stats_ret->fcoe.secs_reset =
+               struct timeval tv;
+               if (fcport->stats_status == BFA_STATUS_OK)
+                       do_gettimeofday(&tv);
+
+               list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
+                       bfa_q_deq(&fcport->stats_pending_q, &qe);
+                       cb = (struct bfa_cb_pending_q_s *)qe;
+                       if (fcport->stats_status == BFA_STATUS_OK) {
+                               ret = (union bfa_fcport_stats_u *)cb->data;
+                               /* Swap FC QoS or FCoE stats */
+                               if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
+                                       bfa_fcport_qos_stats_swap(&ret->fcqos,
+                                                       &fcport->stats->fcqos);
+                               else {
+                                       bfa_fcport_fcoe_stats_swap(&ret->fcoe,
+                                                       &fcport->stats->fcoe);
+                                       ret->fcoe.secs_reset =
                                        tv.tv_sec - fcport->stats_reset_time;
+                               }
                        }
+                       bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
+                                       fcport->stats_status);
                }
-               fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
+               fcport->stats_status = BFA_STATUS_OK;
        } else {
-               fcport->stats_busy = BFA_FALSE;
+               INIT_LIST_HEAD(&fcport->stats_pending_q);
                fcport->stats_status = BFA_STATUS_OK;
        }
 }
@@ -3115,8 +3195,7 @@ bfa_fcport_stats_get_timeout(void *cbarg)
        }
 
        fcport->stats_status = BFA_STATUS_ETIMER;
-       bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
-               fcport);
+       __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
 }
 
 static void
@@ -3139,14 +3218,16 @@ bfa_fcport_send_stats_get(void *cbarg)
 
        memset(msg, 0, sizeof(struct bfi_fcport_req_s));
        bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
-                       bfa_lpuid(fcport->bfa));
-       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+                       bfa_fn_lpu(fcport->bfa));
+       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
 }
 
 static void
 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
 {
-       struct bfa_fcport_s *fcport = cbarg;
+       struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+       struct bfa_cb_pending_q_s *cb;
+       struct list_head *qe, *qen;
 
        if (complete) {
                struct timeval tv;
@@ -3156,10 +3237,15 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
                 */
                do_gettimeofday(&tv);
                fcport->stats_reset_time = tv.tv_sec;
-
-               fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
+               list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
+                       bfa_q_deq(&fcport->statsclr_pending_q, &qe);
+                       cb = (struct bfa_cb_pending_q_s *)qe;
+                       bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
+                                               fcport->stats_status);
+               }
+               fcport->stats_status = BFA_STATUS_OK;
        } else {
-               fcport->stats_busy = BFA_FALSE;
+               INIT_LIST_HEAD(&fcport->statsclr_pending_q);
                fcport->stats_status = BFA_STATUS_OK;
        }
 }
@@ -3177,8 +3263,7 @@ bfa_fcport_stats_clr_timeout(void *cbarg)
        }
 
        fcport->stats_status = BFA_STATUS_ETIMER;
-       bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
-                       __bfa_cb_fcport_stats_clr, fcport);
+       __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
 }
 
 static void
@@ -3201,8 +3286,8 @@ bfa_fcport_send_stats_clear(void *cbarg)
 
        memset(msg, 0, sizeof(struct bfi_fcport_req_s));
        bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
-                       bfa_lpuid(fcport->bfa));
-       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+                       bfa_fn_lpu(fcport->bfa));
+       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
 }
 
 /*
@@ -3329,6 +3414,9 @@ bfa_fcport_init(struct bfa_s *bfa)
        fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
        fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
 
+       if (bfa_fcport_is_pbcdisabled(bfa))
+               bfa->modules.port.pbc_disabled = BFA_TRUE;
+
        WARN_ON(!fcport->cfg.maxfrsize);
        WARN_ON(!fcport->cfg.rx_bbcredit);
        WARN_ON(!fcport->speed_sup);
@@ -3371,6 +3459,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
                                fcport->use_flash_cfg = BFA_FALSE;
                        }
 
+                       if (fcport->cfg.qos_enabled)
+                               fcport->qos_attr.state = BFA_QOS_OFFLINE;
+                       else
+                               fcport->qos_attr.state = BFA_QOS_DISABLED;
+
                        bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
                }
                break;
@@ -3395,28 +3488,26 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
                /*
                 * check for timer pop before processing the rsp
                 */
-               if (fcport->stats_busy == BFA_FALSE ||
-                   fcport->stats_status == BFA_STATUS_ETIMER)
+               if (list_empty(&fcport->stats_pending_q) ||
+                   (fcport->stats_status == BFA_STATUS_ETIMER))
                        break;
 
                bfa_timer_stop(&fcport->timer);
                fcport->stats_status = i2hmsg.pstatsget_rsp->status;
-               bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
-                               __bfa_cb_fcport_stats_get, fcport);
+               __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
                break;
 
        case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
                /*
                 * check for timer pop before processing the rsp
                 */
-               if (fcport->stats_busy == BFA_FALSE ||
-                   fcport->stats_status == BFA_STATUS_ETIMER)
+               if (list_empty(&fcport->statsclr_pending_q) ||
+                   (fcport->stats_status == BFA_STATUS_ETIMER))
                        break;
 
                bfa_timer_stop(&fcport->timer);
                fcport->stats_status = BFA_STATUS_OK;
-               bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
-                               __bfa_cb_fcport_stats_clr, fcport);
+               __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
                break;
 
        case BFI_FCPORT_I2H_ENABLE_AEN:
@@ -3453,6 +3544,9 @@ bfa_fcport_enable(struct bfa_s *bfa)
 {
        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
+       if (bfa_fcport_is_pbcdisabled(bfa))
+               return BFA_STATUS_PBC;
+
        if (bfa_ioc_is_disabled(&bfa->ioc))
                return BFA_STATUS_IOC_DISABLED;
 
@@ -3466,6 +3560,8 @@ bfa_fcport_enable(struct bfa_s *bfa)
 bfa_status_t
 bfa_fcport_disable(struct bfa_s *bfa)
 {
+       if (bfa_fcport_is_pbcdisabled(bfa))
+               return BFA_STATUS_PBC;
 
        if (bfa_ioc_is_disabled(&bfa->ioc))
                return BFA_STATUS_IOC_DISABLED;
@@ -3474,6 +3570,21 @@ bfa_fcport_disable(struct bfa_s *bfa)
        return BFA_STATUS_OK;
 }
 
+/* If PBC is disabled on port, return error */
+bfa_status_t
+bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
+{
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+       struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+       struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
+
+       if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
+               bfa_trc(bfa, fcport->pwwn);
+               return BFA_STATUS_PBC;
+       }
+       return BFA_STATUS_OK;
+}
+
 /*
  * Configure port speed.
  */
@@ -3491,6 +3602,28 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
                return BFA_STATUS_UNSUPP_SPEED;
        }
 
+       /* For Mezz card, port speed entered needs to be checked */
+       if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) {
+               if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
+                       /* For CT2, 1G is not supported */
+                       if ((speed == BFA_PORT_SPEED_1GBPS) &&
+                           (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
+                               return BFA_STATUS_UNSUPP_SPEED;
+
+                       /* Already checked for Auto Speed and Max Speed supp */
+                       if (!(speed == BFA_PORT_SPEED_1GBPS ||
+                             speed == BFA_PORT_SPEED_2GBPS ||
+                             speed == BFA_PORT_SPEED_4GBPS ||
+                             speed == BFA_PORT_SPEED_8GBPS ||
+                             speed == BFA_PORT_SPEED_16GBPS ||
+                             speed == BFA_PORT_SPEED_AUTO))
+                               return BFA_STATUS_UNSUPP_SPEED;
+               } else {
+                       if (speed != BFA_PORT_SPEED_10GBPS)
+                               return BFA_STATUS_UNSUPP_SPEED;
+               }
+       }
+
        fcport->cfg.speed = speed;
 
        return BFA_STATUS_OK;
@@ -3624,11 +3757,14 @@ bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
 }
 
 void
-bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
+bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
 {
        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
        fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
+       fcport->cfg.bb_scn = bb_scn;
+       if (bb_scn)
+               fcport->bbsc_op_state = BFA_TRUE;
        bfa_fcport_send_txcredit(fcport);
 }
 
@@ -3675,16 +3811,23 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
        /* beacon attributes */
        attr->beacon = fcport->beacon;
        attr->link_e2e_beacon = fcport->link_e2e_beacon;
-       attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled;
-       attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
 
        attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
        attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
        attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
-       if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
-               attr->port_state = BFA_PORT_ST_IOCDIS;
-       else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
-               attr->port_state = BFA_PORT_ST_FWMISMATCH;
+       attr->bbsc_op_status =  fcport->bbsc_op_state;
+
+       /* PBC Disabled State */
+       if (bfa_fcport_is_pbcdisabled(bfa))
+               attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
+       else {
+               if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
+                       attr->port_state = BFA_PORT_ST_IOCDIS;
+               else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
+                       attr->port_state = BFA_PORT_ST_FWMISMATCH;
+               else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc))
+                       attr->port_state = BFA_PORT_ST_ACQ_ADDR;
+       }
 
        /* FCoE vlan */
        attr->fcoe_vlan = fcport->fcoe_vlan;
@@ -3696,25 +3839,25 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
  * Fetch port statistics (FCQoS or FCoE).
  */
 bfa_status_t
-bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
-       bfa_cb_port_t cbfn, void *cbarg)
+bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
 {
        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-       if (fcport->stats_busy) {
-               bfa_trc(bfa, fcport->stats_busy);
-               return BFA_STATUS_DEVBUSY;
-       }
+       if (bfa_ioc_is_disabled(&bfa->ioc))
+               return BFA_STATUS_IOC_DISABLED;
 
-       fcport->stats_busy  = BFA_TRUE;
-       fcport->stats_ret   = stats;
-       fcport->stats_cbfn  = cbfn;
-       fcport->stats_cbarg = cbarg;
+       if (!list_empty(&fcport->statsclr_pending_q))
+               return BFA_STATUS_DEVBUSY;
 
-       bfa_fcport_send_stats_get(fcport);
+       if (list_empty(&fcport->stats_pending_q)) {
+               list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
+               bfa_fcport_send_stats_get(fcport);
+               bfa_timer_start(bfa, &fcport->timer,
+                               bfa_fcport_stats_get_timeout,
+                               fcport, BFA_FCPORT_STATS_TOV);
+       } else
+               list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
 
-       bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
-                       fcport, BFA_FCPORT_STATS_TOV);
        return BFA_STATUS_OK;
 }
 
@@ -3722,27 +3865,25 @@ bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
  * Reset port statistics (FCQoS or FCoE).
  */
 bfa_status_t
-bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
+bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
 {
        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-       if (fcport->stats_busy) {
-               bfa_trc(bfa, fcport->stats_busy);
+       if (!list_empty(&fcport->stats_pending_q))
                return BFA_STATUS_DEVBUSY;
-       }
-
-       fcport->stats_busy  = BFA_TRUE;
-       fcport->stats_cbfn  = cbfn;
-       fcport->stats_cbarg = cbarg;
 
-       bfa_fcport_send_stats_clear(fcport);
+       if (list_empty(&fcport->statsclr_pending_q)) {
+               list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
+               bfa_fcport_send_stats_clear(fcport);
+               bfa_timer_start(bfa, &fcport->timer,
+                               bfa_fcport_stats_clr_timeout,
+                               fcport, BFA_FCPORT_STATS_TOV);
+       } else
+               list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
 
-       bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
-                       fcport, BFA_FCPORT_STATS_TOV);
        return BFA_STATUS_OK;
 }
 
-
 /*
  * Fetch port attributes.
  */
@@ -3765,6 +3906,18 @@ bfa_fcport_is_ratelim(struct bfa_s *bfa)
 
 }
 
+/*
+ *     Enable/Disable FAA feature in port config
+ */
+void
+bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
+{
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+       bfa_trc(bfa, state);
+       fcport->cfg.faa_state = state;
+}
+
 /*
  * Get default minimum ratelim speed
  */
@@ -3778,6 +3931,22 @@ bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
 
 }
 
+void
+bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
+                 bfa_boolean_t link_e2e_beacon)
+{
+       struct bfa_s *bfa = dev;
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+       bfa_trc(bfa, beacon);
+       bfa_trc(bfa, link_e2e_beacon);
+       bfa_trc(bfa, fcport->beacon);
+       bfa_trc(bfa, fcport->link_e2e_beacon);
+
+       fcport->beacon = beacon;
+       fcport->link_e2e_beacon = link_e2e_beacon;
+}
+
 bfa_boolean_t
 bfa_fcport_is_linkup(struct bfa_s *bfa)
 {
@@ -3797,6 +3966,14 @@ bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
        return fcport->cfg.qos_enabled;
 }
 
+bfa_boolean_t
+bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
+{
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+       return fcport->cfg.trunked;
+}
+
 /*
  * Rport State machine functions
  */
@@ -4286,18 +4463,22 @@ bfa_rport_qresume(void *cbarg)
 }
 
 static void
-bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-               u32 *dm_len)
+bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+               struct bfa_s *bfa)
 {
+       struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
+
        if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
                cfg->fwcfg.num_rports = BFA_RPORT_MIN;
 
-       *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
+       /* kva memory */
+       bfa_mem_kva_setup(minfo, rport_kva,
+               cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
 }
 
 static void
 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-                    struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+               struct bfa_pcidev_s *pcidev)
 {
        struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
        struct bfa_rport_s *rp;
@@ -4305,8 +4486,9 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 
        INIT_LIST_HEAD(&mod->rp_free_q);
        INIT_LIST_HEAD(&mod->rp_active_q);
+       INIT_LIST_HEAD(&mod->rp_unused_q);
 
-       rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
+       rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
        mod->rps_list = rp;
        mod->num_rports = cfg->fwcfg.num_rports;
 
@@ -4331,7 +4513,7 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
        /*
         * consume memory
         */
-       bfa_meminfo_kva(meminfo) = (u8 *) rp;
+       bfa_mem_kva_curp(mod) = (u8 *) rp;
 }
 
 static void
@@ -4356,6 +4538,9 @@ bfa_rport_iocdisable(struct bfa_s *bfa)
        struct bfa_rport_s *rport;
        struct list_head *qe, *qen;
 
+       /* Enqueue unused rport resources to free_q */
+       list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
+
        list_for_each_safe(qe, qen, &mod->rp_active_q) {
                rport = (struct bfa_rport_s *) qe;
                bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
@@ -4399,11 +4584,11 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
        }
 
        bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
-                       bfa_lpuid(rp->bfa));
+                       bfa_fn_lpu(rp->bfa));
        m->bfa_handle = rp->rport_tag;
        m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
        m->pid = rp->rport_info.pid;
-       m->lp_tag = rp->rport_info.lp_tag;
+       m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
        m->local_pid = rp->rport_info.local_pid;
        m->fc_class = rp->rport_info.fc_class;
        m->vf_en = rp->rport_info.vf_en;
@@ -4413,7 +4598,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
+       bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
        return BFA_TRUE;
 }
 
@@ -4432,13 +4617,13 @@ bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
        }
 
        bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
-                       bfa_lpuid(rp->bfa));
+                       bfa_fn_lpu(rp->bfa));
        m->fw_handle = rp->fw_handle;
 
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
+       bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
        return BFA_TRUE;
 }
 
@@ -4457,14 +4642,14 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
        }
 
        bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
-                       bfa_lpuid(rp->bfa));
+                       bfa_fn_lpu(rp->bfa));
        m->fw_handle = rp->fw_handle;
        m->speed = (u8)rp->rport_info.speed;
 
        /*
         * queue I/O message to firmware
         */
-       bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
+       bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
        return BFA_TRUE;
 }
 
@@ -4492,6 +4677,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
                rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
                rp->fw_handle = msg.create_rsp->fw_handle;
                rp->qos_attr = msg.create_rsp->qos_attr;
+               bfa_rport_set_lunmask(bfa, rp);
                WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
                bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
                break;
@@ -4499,6 +4685,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
        case BFI_RPORT_I2H_DELETE_RSP:
                rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
                WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
+               bfa_rport_unset_lunmask(bfa, rp);
                bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
                break;
 
@@ -4514,7 +4701,18 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
        }
 }
 
+void
+bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
+{
+       struct bfa_rport_mod_s  *mod = BFA_RPORT_MOD(bfa);
+       struct list_head        *qe;
+       int     i;
 
+       for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
+               bfa_q_deq_tail(&mod->rp_free_q, &qe);
+               list_add_tail(qe, &mod->rp_unused_q);
+       }
+}
 
 /*
  *  bfa_rport_api
@@ -4568,6 +4766,37 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
        bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
 }
 
+/* Set Rport LUN Mask */
+void
+bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
+{
+       struct bfa_lps_mod_s    *lps_mod = BFA_LPS_MOD(bfa);
+       wwn_t   lp_wwn, rp_wwn;
+       u8 lp_tag = (u8)rp->rport_info.lp_tag;
+
+       rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
+       lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
+
+       BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
+                                       rp->lun_mask = BFA_TRUE;
+       bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
+}
+
+/* Unset Rport LUN mask */
+void
+bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
+{
+       struct bfa_lps_mod_s    *lps_mod = BFA_LPS_MOD(bfa);
+       wwn_t   lp_wwn, rp_wwn;
+
+       rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
+       lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
+
+       BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
+                               rp->lun_mask = BFA_FALSE;
+       bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
+                       BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
+}
 
 /*
  * SGPG related functions
@@ -4577,26 +4806,51 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
  * Compute and return memory needed by FCP(im) module.
  */
 static void
-bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-               u32 *dm_len)
+bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+               struct bfa_s *bfa)
 {
+       struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
+       struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
+       struct bfa_mem_dma_s *seg_ptr;
+       u16     nsegs, idx, per_seg_sgpg, num_sgpg;
+       u32     sgpg_sz = sizeof(struct bfi_sgpg_s);
+
        if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
                cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
+       else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
+               cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
 
-       *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
-       *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
-}
+       num_sgpg = cfg->drvcfg.num_sgpgs;
+
+       nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
+       per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
+
+       bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
+               if (num_sgpg >= per_seg_sgpg) {
+                       num_sgpg -= per_seg_sgpg;
+                       bfa_mem_dma_setup(minfo, seg_ptr,
+                                       per_seg_sgpg * sgpg_sz);
+               } else
+                       bfa_mem_dma_setup(minfo, seg_ptr,
+                                       num_sgpg * sgpg_sz);
+       }
 
+       /* kva memory */
+       bfa_mem_kva_setup(minfo, sgpg_kva,
+               cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
+}
 
 static void
 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-                   struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
+               struct bfa_pcidev_s *pcidev)
 {
        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
-       int i;
        struct bfa_sgpg_s *hsgpg;
        struct bfi_sgpg_s *sgpg;
        u64 align_len;
+       struct bfa_mem_dma_s *seg_ptr;
+       u32     sgpg_sz = sizeof(struct bfi_sgpg_s);
+       u16     i, idx, nsegs, per_seg_sgpg, num_sgpg;
 
        union {
                u64 pa;
@@ -4608,39 +4862,45 @@ bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 
        bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
 
-       mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
-       mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
-       align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
-       mod->sgpg_arr_pa += align_len;
-       mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
-                                               align_len);
-       mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
-                                               align_len);
-
-       hsgpg = mod->hsgpg_arr;
-       sgpg = mod->sgpg_arr;
-       sgpg_pa.pa = mod->sgpg_arr_pa;
-       mod->free_sgpgs = mod->num_sgpgs;
-
-       WARN_ON(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1));
-
-       for (i = 0; i < mod->num_sgpgs; i++) {
-               memset(hsgpg, 0, sizeof(*hsgpg));
-               memset(sgpg, 0, sizeof(*sgpg));
-
-               hsgpg->sgpg = sgpg;
-               sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
-               hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
-               list_add_tail(&hsgpg->qe, &mod->sgpg_q);
-
-               hsgpg++;
-               sgpg++;
-               sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
+       mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
+
+       num_sgpg = cfg->drvcfg.num_sgpgs;
+       nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
+
+       /* dma/kva mem claim */
+       hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
+
+       bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
+
+               if (!bfa_mem_dma_virt(seg_ptr))
+                       break;
+
+               align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
+                                            bfa_mem_dma_phys(seg_ptr);
+
+               sgpg = (struct bfi_sgpg_s *)
+                       (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
+               sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
+               WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
+
+               per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
+
+               for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
+                       memset(hsgpg, 0, sizeof(*hsgpg));
+                       memset(sgpg, 0, sizeof(*sgpg));
+
+                       hsgpg->sgpg = sgpg;
+                       sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
+                       hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
+                       list_add_tail(&hsgpg->qe, &mod->sgpg_q);
+
+                       sgpg++;
+                       hsgpg++;
+                       sgpg_pa.pa += sgpg_sz;
+               }
        }
 
-       bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
-       bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
-       bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
+       bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
 }
 
 static void
@@ -4782,31 +5042,13 @@ __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
 }
 
 static void
-claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
-{
-       u32 uf_pb_tot_sz;
-
-       ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
-       ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
-       uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
-                                                       BFA_DMA_ALIGN_SZ);
-
-       bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
-       bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
-
-       memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
-}
-
-static void
-claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
+claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
 {
        struct bfi_uf_buf_post_s *uf_bp_msg;
-       struct bfi_sge_s      *sge;
-       union bfi_addr_u      sga_zero = { {0} };
        u16 i;
        u16 buf_len;
 
-       ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
+       ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
        uf_bp_msg = ufm->uf_buf_posts;
 
        for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
@@ -4817,28 +5059,18 @@ claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
                buf_len = sizeof(struct bfa_uf_buf_s);
                uf_bp_msg->buf_len = cpu_to_be16(buf_len);
                bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
-                           bfa_lpuid(ufm->bfa));
-
-               sge = uf_bp_msg->sge;
-               sge[0].sg_len = buf_len;
-               sge[0].flags = BFI_SGE_DATA_LAST;
-               bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
-               bfa_sge_to_be(sge);
-
-               sge[1].sg_len = buf_len;
-               sge[1].flags = BFI_SGE_PGDLEN;
-               sge[1].sga = sga_zero;
-               bfa_sge_to_be(&sge[1]);
+                           bfa_fn_lpu(ufm->bfa));
+               bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
        }
 
        /*
         * advance pointer beyond consumed memory
         */
-       bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
+       bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
 }
 
 static void
-claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
+claim_ufs(struct bfa_uf_mod_s *ufm)
 {
        u16 i;
        struct bfa_uf_s   *uf;
@@ -4846,7 +5078,7 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
        /*
         * Claim block of memory for UF list
         */
-       ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
+       ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
 
        /*
         * Initialize UFs and queue it in UF free queue
@@ -4855,8 +5087,8 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
                memset(uf, 0, sizeof(struct bfa_uf_s));
                uf->bfa = ufm->bfa;
                uf->uf_tag = i;
-               uf->pb_len = sizeof(struct bfa_uf_buf_s);
-               uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
+               uf->pb_len = BFA_PER_UF_DMA_SZ;
+               uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
                uf->buf_pa = ufm_pbs_pa(ufm, i);
                list_add_tail(&uf->qe, &ufm->uf_free_q);
        }
@@ -4864,48 +5096,57 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
        /*
         * advance memory pointer
         */
-       bfa_meminfo_kva(mi) = (u8 *) uf;
+       bfa_mem_kva_curp(ufm) = (u8 *) uf;
 }
 
 static void
-uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
+uf_mem_claim(struct bfa_uf_mod_s *ufm)
 {
-       claim_uf_pbs(ufm, mi);
-       claim_ufs(ufm, mi);
-       claim_uf_post_msgs(ufm, mi);
+       claim_ufs(ufm);
+       claim_uf_post_msgs(ufm);
 }
 
 static void
-bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
+bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+               struct bfa_s *bfa)
 {
-       u32 num_ufs = cfg->fwcfg.num_uf_bufs;
-
-       /*
-        * dma-able memory for UF posted bufs
-        */
-       *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
-                                                       BFA_DMA_ALIGN_SZ);
+       struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+       struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
+       u32     num_ufs = cfg->fwcfg.num_uf_bufs;
+       struct bfa_mem_dma_s *seg_ptr;
+       u16     nsegs, idx, per_seg_uf = 0;
+
+       nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
+       per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
+
+       bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
+               if (num_ufs >= per_seg_uf) {
+                       num_ufs -= per_seg_uf;
+                       bfa_mem_dma_setup(minfo, seg_ptr,
+                               per_seg_uf * BFA_PER_UF_DMA_SZ);
+               } else
+                       bfa_mem_dma_setup(minfo, seg_ptr,
+                               num_ufs * BFA_PER_UF_DMA_SZ);
+       }
 
-       /*
-        * kernel Virtual memory for UFs and UF buf post msg copies
-        */
-       *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
-       *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
+       /* kva memory */
+       bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
+               (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
 }
 
 static void
 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-                 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+               struct bfa_pcidev_s *pcidev)
 {
        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
 
-       memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
        ufm->bfa = bfa;
        ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
        INIT_LIST_HEAD(&ufm->uf_free_q);
        INIT_LIST_HEAD(&ufm->uf_posted_q);
+       INIT_LIST_HEAD(&ufm->uf_unused_q);
 
-       uf_mem_claim(ufm, meminfo);
+       uf_mem_claim(ufm);
 }
 
 static void
@@ -4939,7 +5180,7 @@ bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
 
        memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
                      sizeof(struct bfi_uf_buf_post_s));
-       bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
+       bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
 
        bfa_trc(ufm->bfa, uf->uf_tag);
 
@@ -4963,11 +5204,15 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
 {
        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
        u16 uf_tag = m->buf_tag;
-       struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
        struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
-       u8 *buf = &uf_buf->d[0];
+       struct bfa_uf_buf_s *uf_buf;
+       uint8_t *buf;
        struct fchs_s *fchs;
 
+       uf_buf = (struct bfa_uf_buf_s *)
+                       bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
+       buf = &uf_buf->d[0];
+
        m->frm_len = be16_to_cpu(m->frm_len);
        m->xfr_len = be16_to_cpu(m->xfr_len);
 
@@ -5008,6 +5253,9 @@ bfa_uf_iocdisable(struct bfa_s *bfa)
        struct bfa_uf_s *uf;
        struct list_head *qe, *qen;
 
+       /* Enqueue unused uf resources to free_q */
+       list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
+
        list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
                uf = (struct bfa_uf_s *) qe;
                list_del(&uf->qe);
@@ -5072,4 +5320,433 @@ bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
        }
 }
 
+void
+bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
+{
+       struct bfa_uf_mod_s     *mod = BFA_UF_MOD(bfa);
+       struct list_head        *qe;
+       int     i;
+
+       for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
+               bfa_q_deq_tail(&mod->uf_free_q, &qe);
+               list_add_tail(qe, &mod->uf_unused_q);
+       }
+}
+
+/*
+ *     BFA fcdiag module
+ */
+#define BFA_DIAG_QTEST_TOV     1000    /* msec */
+
+/*
+ *     Set port status to busy
+ */
+static void
+bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
+{
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
+
+       if (fcdiag->lb.lock)
+               fcport->diag_busy = BFA_TRUE;
+       else
+               fcport->diag_busy = BFA_FALSE;
+}
+
+static void
+bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+               struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+               struct bfa_pcidev_s *pcidev)
+{
+       struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+       fcdiag->bfa             = bfa;
+       fcdiag->trcmod  = bfa->trcmod;
+       /* The common DIAG attach bfa_diag_attach() will do all memory claim */
+}
+
+static void
+bfa_fcdiag_iocdisable(struct bfa_s *bfa)
+{
+       struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+       bfa_trc(fcdiag, fcdiag->lb.lock);
+       if (fcdiag->lb.lock) {
+               fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
+               fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
+               fcdiag->lb.lock = 0;
+               bfa_fcdiag_set_busy_status(fcdiag);
+       }
+}
+
+static void
+bfa_fcdiag_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcdiag_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcdiag_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcdiag_queuetest_timeout(void *cbarg)
+{
+       struct bfa_fcdiag_s       *fcdiag = cbarg;
+       struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
+
+       bfa_trc(fcdiag, fcdiag->qtest.all);
+       bfa_trc(fcdiag, fcdiag->qtest.count);
+
+       fcdiag->qtest.timer_active = 0;
+
+       res->status = BFA_STATUS_ETIMER;
+       res->count  = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
+       if (fcdiag->qtest.all)
+               res->queue  = fcdiag->qtest.all;
+
+       bfa_trc(fcdiag, BFA_STATUS_ETIMER);
+       fcdiag->qtest.status = BFA_STATUS_ETIMER;
+       fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
+       fcdiag->qtest.lock = 0;
+}
+
+static bfa_status_t
+bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
+{
+       u32     i;
+       struct bfi_diag_qtest_req_s *req;
+
+       req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
+       if (!req)
+               return BFA_STATUS_DEVBUSY;
+
+       /* build host command */
+       bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
+               bfa_fn_lpu(fcdiag->bfa));
+
+       for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
+               req->data[i] = QTEST_PAT_DEFAULT;
+
+       bfa_trc(fcdiag, fcdiag->qtest.queue);
+       /* ring door bell */
+       bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
+       return BFA_STATUS_OK;
+}
+
+static void
+bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
+                       bfi_diag_qtest_rsp_t *rsp)
+{
+       struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
+       bfa_status_t status = BFA_STATUS_OK;
+       int i;
+
+       /* Check timer, should still be active   */
+       if (!fcdiag->qtest.timer_active) {
+               bfa_trc(fcdiag, fcdiag->qtest.timer_active);
+               return;
+       }
+
+       /* update count */
+       fcdiag->qtest.count--;
+
+       /* Check result */
+       for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
+               if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
+                       res->status = BFA_STATUS_DATACORRUPTED;
+                       break;
+               }
+       }
+
+       if (res->status == BFA_STATUS_OK) {
+               if (fcdiag->qtest.count > 0) {
+                       status = bfa_fcdiag_queuetest_send(fcdiag);
+                       if (status == BFA_STATUS_OK)
+                               return;
+                       else
+                               res->status = status;
+               } else if (fcdiag->qtest.all > 0 &&
+                       fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
+                       fcdiag->qtest.count = QTEST_CNT_DEFAULT;
+                       fcdiag->qtest.queue++;
+                       status = bfa_fcdiag_queuetest_send(fcdiag);
+                       if (status == BFA_STATUS_OK)
+                               return;
+                       else
+                               res->status = status;
+               }
+       }
+
+       /* Stop timer when we comp all queue */
+       if (fcdiag->qtest.timer_active) {
+               bfa_timer_stop(&fcdiag->qtest.timer);
+               fcdiag->qtest.timer_active = 0;
+       }
+       res->queue = fcdiag->qtest.queue;
+       res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
+       bfa_trc(fcdiag, res->count);
+       bfa_trc(fcdiag, res->status);
+       fcdiag->qtest.status = res->status;
+       fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
+       fcdiag->qtest.lock = 0;
+}
+
+static void
+bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
+                       struct bfi_diag_lb_rsp_s *rsp)
+{
+       struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
+
+       res->numtxmfrm  = be32_to_cpu(rsp->res.numtxmfrm);
+       res->numosffrm  = be32_to_cpu(rsp->res.numosffrm);
+       res->numrcvfrm  = be32_to_cpu(rsp->res.numrcvfrm);
+       res->badfrminf  = be32_to_cpu(rsp->res.badfrminf);
+       res->badfrmnum  = be32_to_cpu(rsp->res.badfrmnum);
+       res->status     = rsp->res.status;
+       fcdiag->lb.status = rsp->res.status;
+       bfa_trc(fcdiag, fcdiag->lb.status);
+       fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
+       fcdiag->lb.lock = 0;
+       bfa_fcdiag_set_busy_status(fcdiag);
+}
+
+static bfa_status_t
+bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
+                       struct bfa_diag_loopback_s *loopback)
+{
+       struct bfi_diag_lb_req_s *lb_req;
+
+       lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
+       if (!lb_req)
+               return BFA_STATUS_DEVBUSY;
+
+       /* build host command */
+       bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
+               bfa_fn_lpu(fcdiag->bfa));
+
+       lb_req->lb_mode = loopback->lb_mode;
+       lb_req->speed = loopback->speed;
+       lb_req->loopcnt = loopback->loopcnt;
+       lb_req->pattern = loopback->pattern;
+
+       /* ring door bell */
+       bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
+
+       bfa_trc(fcdiag, loopback->lb_mode);
+       bfa_trc(fcdiag, loopback->speed);
+       bfa_trc(fcdiag, loopback->loopcnt);
+       bfa_trc(fcdiag, loopback->pattern);
+       return BFA_STATUS_OK;
+}
+
+/*
+ *     cpe/rme intr handler
+ */
+void
+bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+{
+       struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+
+       switch (msg->mhdr.msg_id) {
+       case BFI_DIAG_I2H_LOOPBACK:
+               bfa_fcdiag_loopback_comp(fcdiag,
+                               (struct bfi_diag_lb_rsp_s *) msg);
+               break;
+       case BFI_DIAG_I2H_QTEST:
+               bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
+               break;
+       default:
+               bfa_trc(fcdiag, msg->mhdr.msg_id);
+               WARN_ON(1);
+       }
+}
+
+/*
+ *     Loopback test
+ *
+ *   @param[in] *bfa            - bfa data struct
+ *   @param[in] opmode          - port operation mode
+ *   @param[in] speed           - port speed
+ *   @param[in] lpcnt           - loop count
+ *   @param[in] pat                     - pattern to build packet
+ *   @param[in] *result         - pt to bfa_diag_loopback_result_t data struct
+ *   @param[in] cbfn            - callback function
+ *   @param[in] cbarg           - callback functioin arg
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
+               enum bfa_port_speed speed, u32 lpcnt, u32 pat,
+               struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
+               void *cbarg)
+{
+       struct  bfa_diag_loopback_s loopback;
+       struct bfa_port_attr_s attr;
+       bfa_status_t status;
+       struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+
+       if (!bfa_iocfc_is_operational(bfa))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /* if port is PBC disabled, return error */
+       if (bfa_fcport_is_pbcdisabled(bfa)) {
+               bfa_trc(fcdiag, BFA_STATUS_PBC);
+               return BFA_STATUS_PBC;
+       }
+
+       if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
+               bfa_trc(fcdiag, opmode);
+               return BFA_STATUS_PORT_NOT_DISABLED;
+       }
+
+       /*
+        * Check if input speed is supported by the port mode
+        */
+       if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
+               if (!(speed == BFA_PORT_SPEED_1GBPS ||
+                     speed == BFA_PORT_SPEED_2GBPS ||
+                     speed == BFA_PORT_SPEED_4GBPS ||
+                     speed == BFA_PORT_SPEED_8GBPS ||
+                     speed == BFA_PORT_SPEED_16GBPS ||
+                     speed == BFA_PORT_SPEED_AUTO)) {
+                       bfa_trc(fcdiag, speed);
+                       return BFA_STATUS_UNSUPP_SPEED;
+               }
+               bfa_fcport_get_attr(bfa, &attr);
+               bfa_trc(fcdiag, attr.speed_supported);
+               if (speed > attr.speed_supported)
+                       return BFA_STATUS_UNSUPP_SPEED;
+       } else {
+               if (speed != BFA_PORT_SPEED_10GBPS) {
+                       bfa_trc(fcdiag, speed);
+                       return BFA_STATUS_UNSUPP_SPEED;
+               }
+       }
+
+       /* For Mezz card, port speed entered needs to be checked */
+       if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
+               if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
+                       if ((speed == BFA_PORT_SPEED_1GBPS) &&
+                           (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
+                               return BFA_STATUS_UNSUPP_SPEED;
+                       if (!(speed == BFA_PORT_SPEED_1GBPS ||
+                             speed == BFA_PORT_SPEED_2GBPS ||
+                             speed == BFA_PORT_SPEED_4GBPS ||
+                             speed == BFA_PORT_SPEED_8GBPS ||
+                             speed == BFA_PORT_SPEED_16GBPS ||
+                             speed == BFA_PORT_SPEED_AUTO))
+                               return BFA_STATUS_UNSUPP_SPEED;
+               } else {
+                       if (speed != BFA_PORT_SPEED_10GBPS)
+                               return BFA_STATUS_UNSUPP_SPEED;
+               }
+       }
+
+       /* check to see if there is another destructive diag cmd running */
+       if (fcdiag->lb.lock) {
+               bfa_trc(fcdiag, fcdiag->lb.lock);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       fcdiag->lb.lock = 1;
+       loopback.lb_mode = opmode;
+       loopback.speed = speed;
+       loopback.loopcnt = lpcnt;
+       loopback.pattern = pat;
+       fcdiag->lb.result = result;
+       fcdiag->lb.cbfn = cbfn;
+       fcdiag->lb.cbarg = cbarg;
+       memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
+       bfa_fcdiag_set_busy_status(fcdiag);
+
+       /* Send msg to fw */
+       status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
+       return status;
+}
+
+/*
+ *     DIAG queue test command
+ *
+ *   @param[in] *bfa            - bfa data struct
+ *   @param[in] force           - 1: don't do ioc op checking
+ *   @param[in] queue           - queue no. to test
+ *   @param[in] *result         - pt to bfa_diag_qtest_result_t data struct
+ *   @param[in] cbfn            - callback function
+ *   @param[in] *cbarg          - callback functioin arg
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
+               struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
+               void *cbarg)
+{
+       struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+       bfa_status_t status;
+       bfa_trc(fcdiag, force);
+       bfa_trc(fcdiag, queue);
+
+       if (!force && !bfa_iocfc_is_operational(bfa))
+               return BFA_STATUS_IOC_NON_OP;
+
+       /* check to see if there is another destructive diag cmd running */
+       if (fcdiag->qtest.lock) {
+               bfa_trc(fcdiag, fcdiag->qtest.lock);
+               return BFA_STATUS_DEVBUSY;
+       }
+
+       /* Initialization */
+       fcdiag->qtest.lock = 1;
+       fcdiag->qtest.cbfn = cbfn;
+       fcdiag->qtest.cbarg = cbarg;
+       fcdiag->qtest.result = result;
+       fcdiag->qtest.count = QTEST_CNT_DEFAULT;
+
+       /* Init test results */
+       fcdiag->qtest.result->status = BFA_STATUS_OK;
+       fcdiag->qtest.result->count  = 0;
+
+       /* send */
+       if (queue < BFI_IOC_MAX_CQS) {
+               fcdiag->qtest.result->queue  = (u8)queue;
+               fcdiag->qtest.queue = (u8)queue;
+               fcdiag->qtest.all   = 0;
+       } else {
+               fcdiag->qtest.result->queue  = 0;
+               fcdiag->qtest.queue = 0;
+               fcdiag->qtest.all   = 1;
+       }
+       status = bfa_fcdiag_queuetest_send(fcdiag);
+
+       /* Start a timer */
+       if (status == BFA_STATUS_OK) {
+               bfa_timer_start(bfa, &fcdiag->qtest.timer,
+                               bfa_fcdiag_queuetest_timeout, fcdiag,
+                               BFA_DIAG_QTEST_TOV);
+               fcdiag->qtest.timer_active = 1;
+       }
+       return status;
+}
 
+/*
+ * DIAG PLB is running
+ *
+ *   @param[in] *bfa    - bfa data struct
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
+{
+       struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+       return fcdiag->lb.lock ?  BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
+}
index 5902a45c080fe8fc6fac75a2fa6b8122b460e5d3..95adb86d3769d477bdbf0b652aa704621eca665b 100644 (file)
@@ -26,6 +26,7 @@
  * Scatter-gather DMA related defines
  */
 #define BFA_SGPG_MIN   (16)
+#define BFA_SGPG_MAX   (8192)
 
 /*
  * Alignment macro for SG page allocation
@@ -54,17 +55,21 @@ struct bfa_sgpg_s {
  */
 #define BFA_SGPG_NPAGE(_nsges)  (((_nsges) / BFI_SGPG_DATA_SGES) + 1)
 
+/* Max SGPG dma segs required */
+#define BFA_SGPG_DMA_SEGS      \
+       BFI_MEM_DMA_NSEGS(BFA_SGPG_MAX, (uint32_t)sizeof(struct bfi_sgpg_s))
+
 struct bfa_sgpg_mod_s {
        struct bfa_s *bfa;
        int             num_sgpgs;      /*  number of SG pages          */
        int             free_sgpgs;     /*  number of free SG pages     */
-       struct bfa_sgpg_s       *hsgpg_arr;     /*  BFA SG page array   */
-       struct bfi_sgpg_s *sgpg_arr;    /*  actual SG page array        */
-       u64     sgpg_arr_pa;    /*  SG page array DMA addr      */
        struct list_head        sgpg_q;         /*  queue of free SG pages */
        struct list_head        sgpg_wait_q;    /*  wait queue for SG pages */
+       struct bfa_mem_dma_s    dma_seg[BFA_SGPG_DMA_SEGS];
+       struct bfa_mem_kva_s    kva_seg;
 };
 #define BFA_SGPG_MOD(__bfa)    (&(__bfa)->modules.sgpg_mod)
+#define BFA_MEM_SGPG_KVA(__bfa) (&(BFA_SGPG_MOD(__bfa)->kva_seg))
 
 bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q,
                             int nsgpgs);
@@ -79,26 +84,32 @@ void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
  * FCXP related defines
  */
 #define BFA_FCXP_MIN           (1)
+#define BFA_FCXP_MAX           (256)
 #define BFA_FCXP_MAX_IBUF_SZ   (2 * 1024 + 256)
 #define BFA_FCXP_MAX_LBUF_SZ   (4 * 1024 + 256)
 
+/* Max FCXP dma segs required */
+#define BFA_FCXP_DMA_SEGS                                              \
+       BFI_MEM_DMA_NSEGS(BFA_FCXP_MAX,                                 \
+               (u32)BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ)
+
 struct bfa_fcxp_mod_s {
        struct bfa_s      *bfa;         /* backpointer to BFA */
        struct bfa_fcxp_s *fcxp_list;   /* array of FCXPs */
        u16     num_fcxps;      /* max num FCXP requests */
        struct list_head  fcxp_free_q;  /* free FCXPs */
        struct list_head  fcxp_active_q;        /* active FCXPs */
-       void            *req_pld_list_kva;      /* list of FCXP req pld */
-       u64     req_pld_list_pa;        /* list of FCXP req pld */
-       void            *rsp_pld_list_kva;      /* list of FCXP resp pld */
-       u64     rsp_pld_list_pa;        /* list of FCXP resp pld */
        struct list_head  wait_q;               /* wait queue for free fcxp */
+       struct list_head fcxp_unused_q; /* unused fcxps */
        u32     req_pld_sz;
        u32     rsp_pld_sz;
+       struct bfa_mem_dma_s dma_seg[BFA_FCXP_DMA_SEGS];
+       struct bfa_mem_kva_s kva_seg;
 };
 
 #define BFA_FCXP_MOD(__bfa)            (&(__bfa)->modules.fcxp_mod)
 #define BFA_FCXP_FROM_TAG(__mod, __tag)        (&(__mod)->fcxp_list[__tag])
+#define BFA_MEM_FCXP_KVA(__bfa) (&(BFA_FCXP_MOD(__bfa)->kva_seg))
 
 typedef void    (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
                                   void *cb_arg, bfa_status_t req_status,
@@ -206,13 +217,15 @@ struct bfa_fcxp_wqe_s {
 #define BFA_FCXP_RSP_FCHS(_fcxp)       (&((_fcxp)->rsp_info.fchs))
 #define BFA_FCXP_RSP_PLD(_fcxp)                (bfa_fcxp_get_rspbuf(_fcxp))
 
-#define BFA_FCXP_REQ_PLD_PA(_fcxp)                             \
-       ((_fcxp)->fcxp_mod->req_pld_list_pa +                   \
-        ((_fcxp)->fcxp_mod->req_pld_sz  * (_fcxp)->fcxp_tag))
+#define BFA_FCXP_REQ_PLD_PA(_fcxp)                                           \
+       bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag,           \
+               (_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz)
 
-#define BFA_FCXP_RSP_PLD_PA(_fcxp)                             \
-       ((_fcxp)->fcxp_mod->rsp_pld_list_pa +                   \
-        ((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag))
+/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
+#define BFA_FCXP_RSP_PLD_PA(_fcxp)                                            \
+       (bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag,           \
+             (_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz) + \
+             (_fcxp)->fcxp_mod->req_pld_sz)
 
 void   bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 
@@ -238,10 +251,13 @@ struct bfa_rport_mod_s {
        struct bfa_rport_s *rps_list;   /*  list of rports      */
        struct list_head        rp_free_q;      /*  free bfa_rports     */
        struct list_head        rp_active_q;    /*  free bfa_rports     */
+       struct list_head        rp_unused_q;    /*  unused bfa rports  */
        u16     num_rports;     /*  number of rports    */
+       struct bfa_mem_kva_s    kva_seg;
 };
 
 #define BFA_RPORT_MOD(__bfa)   (&(__bfa)->modules.rport_mod)
+#define BFA_MEM_RPORT_KVA(__bfa) (&(BFA_RPORT_MOD(__bfa)->kva_seg))
 
 /*
  * Convert rport tag to RPORT
@@ -254,6 +270,7 @@ struct bfa_rport_mod_s {
  * protected functions
  */
 void   bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void   bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw);
 
 /*
  *     BFA rport information.
@@ -280,6 +297,7 @@ struct bfa_rport_s {
        void            *rport_drv;     /*  fcs/driver rport object         */
        u16     fw_handle;      /*  firmware rport handle           */
        u16     rport_tag;      /*  BFA rport tag                   */
+       u8      lun_mask;       /*  LUN mask flag                   */
        struct bfa_rport_info_s rport_info; /*  rport info from fcs/driver */
        struct bfa_reqq_wait_s reqq_wait; /*  to wait for room in reqq     */
        struct bfa_cb_qe_s hcb_qe;      /*  BFA callback qelem              */
@@ -298,7 +316,7 @@ struct bfa_rport_s {
  */
 
 #define BFA_UF_MIN     (4)
-
+#define BFA_UF_MAX     (256)
 
 struct bfa_uf_s {
        struct list_head        qe;     /*  queue element               */
@@ -326,36 +344,41 @@ struct bfa_uf_s {
  */
 typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf);
 
+#define BFA_UF_BUFSZ   (2 * 1024 + 256)
+
+struct bfa_uf_buf_s {
+       u8      d[BFA_UF_BUFSZ];
+};
+
+#define BFA_PER_UF_DMA_SZ      \
+       (u32)BFA_ROUNDUP(sizeof(struct bfa_uf_buf_s), BFA_DMA_ALIGN_SZ)
+
+/* Max UF dma segs required */
+#define BFA_UF_DMA_SEGS BFI_MEM_DMA_NSEGS(BFA_UF_MAX, BFA_PER_UF_DMA_SZ)
+
 struct bfa_uf_mod_s {
        struct bfa_s *bfa;              /*  back pointer to BFA */
        struct bfa_uf_s *uf_list;       /*  array of UFs */
        u16     num_ufs;        /*  num unsolicited rx frames */
        struct list_head        uf_free_q;      /*  free UFs */
        struct list_head        uf_posted_q;    /*  UFs posted to IOC */
-       struct bfa_uf_buf_s *uf_pbs_kva;        /*  list UF bufs request pld */
-       u64     uf_pbs_pa;      /*  phy addr for UF bufs */
+       struct list_head        uf_unused_q;    /*  unused UF's */
        struct bfi_uf_buf_post_s *uf_buf_posts;
        /*  pre-built UF post msgs */
        bfa_cb_uf_recv_t ufrecv;        /*  uf recv handler function */
        void            *cbarg;         /*  uf receive handler arg */
+       struct bfa_mem_dma_s    dma_seg[BFA_UF_DMA_SEGS];
+       struct bfa_mem_kva_s    kva_seg;
 };
 
 #define BFA_UF_MOD(__bfa)      (&(__bfa)->modules.uf_mod)
+#define BFA_MEM_UF_KVA(__bfa)  (&(BFA_UF_MOD(__bfa)->kva_seg))
 
 #define ufm_pbs_pa(_ufmod, _uftag)                                     \
-       ((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag))
+       bfa_mem_get_dmabuf_pa(_ufmod, _uftag, BFA_PER_UF_DMA_SZ)
 
 void   bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
-
-#define BFA_UF_BUFSZ   (2 * 1024 + 256)
-
-/*
- * @todo private
- */
-struct bfa_uf_buf_s {
-       u8              d[BFA_UF_BUFSZ];
-};
-
+void   bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw);
 
 /*
  * LPS - bfa lport login/logout service interface
@@ -364,7 +387,8 @@ struct bfa_lps_s {
        struct list_head        qe;     /*  queue element               */
        struct bfa_s    *bfa;           /*  parent bfa instance */
        bfa_sm_t        sm;             /*  finite state machine        */
-       u8              lp_tag;         /*  lport tag                   */
+       u8              bfa_tag;        /*  lport tag           */
+       u8              fw_tag;         /*  lport fw tag                */
        u8              reqq;           /*  lport request queue */
        u8              alpa;           /*  ALPA for loop topologies    */
        u32     lp_pid;         /*  lport port ID               */
@@ -377,8 +401,11 @@ struct bfa_lps_s {
        bfa_status_t    status;         /*  login status                */
        u16             pdusz;          /*  max receive PDU size        */
        u16             pr_bbcred;      /*  BB_CREDIT from peer         */
+       u8              pr_bbscn;       /*  BB_SCN from peer            */
+       u8              bb_scn;         /*  local BB_SCN                */
        u8              lsrjt_rsn;      /*  LSRJT reason                */
        u8              lsrjt_expl;     /*  LSRJT explanation           */
+       u8              lun_mask;       /*  LUN mask flag               */
        wwn_t           pwwn;           /*  port wwn of lport           */
        wwn_t           nwwn;           /*  node wwn of lport           */
        wwn_t           pr_pwwn;        /*  port wwn of lport peer      */
@@ -395,12 +422,15 @@ struct bfa_lps_s {
 struct bfa_lps_mod_s {
        struct list_head                lps_free_q;
        struct list_head                lps_active_q;
+       struct list_head                lps_login_q;
        struct bfa_lps_s        *lps_arr;
        int                     num_lps;
+       struct bfa_mem_kva_s    kva_seg;
 };
 
 #define BFA_LPS_MOD(__bfa)             (&(__bfa)->modules.lps_mod)
 #define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag])
+#define BFA_MEM_LPS_KVA(__bfa) (&(BFA_LPS_MOD(__bfa)->kva_seg))
 
 /*
  * external functions
@@ -413,7 +443,6 @@ void        bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
  */
 
 #define BFA_FCPORT(_bfa)       (&((_bfa)->modules.port))
-typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
 
 /*
  * Link notification data structure
@@ -467,21 +496,22 @@ struct bfa_fcport_s {
        u8                      *stats_kva;
        u64             stats_pa;
        union bfa_fcport_stats_u *stats;
-       union bfa_fcport_stats_u *stats_ret; /*  driver stats location */
        bfa_status_t            stats_status; /*  stats/statsclr status */
-       bfa_boolean_t           stats_busy; /*  outstanding stats/statsclr */
+       struct list_head        stats_pending_q;
+       struct list_head        statsclr_pending_q;
        bfa_boolean_t           stats_qfull;
        u32             stats_reset_time; /*  stats reset time stamp */
-       bfa_cb_port_t           stats_cbfn; /*  driver callback function */
-       void                    *stats_cbarg; /* *!< user callback arg */
        bfa_boolean_t           diag_busy; /*  diag busy status */
        bfa_boolean_t           beacon; /*  port beacon status */
        bfa_boolean_t           link_e2e_beacon; /*  link beacon status */
+       bfa_boolean_t           bbsc_op_state;  /* Cred recov Oper State */
        struct bfa_fcport_trunk_s trunk;
        u16             fcoe_vlan;
+       struct bfa_mem_dma_s    fcport_dma;
 };
 
 #define BFA_FCPORT_MOD(__bfa)  (&(__bfa)->modules.fcport)
+#define BFA_MEM_FCPORT_DMA(__bfa) (&(BFA_FCPORT_MOD(__bfa)->fcport_dma))
 
 /*
  * protected functions
@@ -515,15 +545,19 @@ void bfa_fcport_event_register(struct bfa_s *bfa,
 bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
 enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
 
-void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
+void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn);
 bfa_boolean_t     bfa_fcport_is_ratelim(struct bfa_s *bfa);
+void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
+                       bfa_boolean_t link_e2e_beacon);
 bfa_boolean_t  bfa_fcport_is_linkup(struct bfa_s *bfa);
 bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
-                                 union bfa_fcport_stats_u *stats,
-                                 bfa_cb_port_t cbfn, void *cbarg);
-bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
-                                   void *cbarg);
+                       struct bfa_cb_pending_q_s *cb);
+bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa,
+                       struct bfa_cb_pending_q_s *cb);
 bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
+bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa);
+void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state);
 
 /*
  * bfa rport API functions
@@ -541,6 +575,19 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
                               struct bfa_rport_qos_attr_s old_qos_attr,
                               struct bfa_rport_qos_attr_s new_qos_attr);
 
+/*
+ *     Rport LUN masking related
+ */
+#define BFA_RPORT_TAG_INVALID  0xffff
+#define BFA_LP_TAG_INVALID     0xff
+void   bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
+void   bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
+bfa_boolean_t  bfa_rport_lunmask_active(struct bfa_rport_s *rp);
+wwn_t  bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
+struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
+                                        wwn_t *lpwwn, wwn_t rpwwn);
+void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
+
 /*
  * bfa fcxp API functions
  */
@@ -577,6 +624,7 @@ void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
 bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp);
 u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp);
 u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa);
+void bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw);
 
 static inline void *
 bfa_uf_get_frmbuf(struct bfa_uf_s *uf)
@@ -606,11 +654,12 @@ struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
 void bfa_lps_delete(struct bfa_lps_s *lps);
 void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa,
                   u16 pdusz, wwn_t pwwn, wwn_t nwwn,
-                  bfa_boolean_t auth_en);
+                  bfa_boolean_t auth_en, u8 bb_scn);
 void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz,
                   wwn_t pwwn, wwn_t nwwn);
 void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
 void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, u32 n2n_pid);
+u8 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag);
 u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
 u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
 void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
@@ -618,4 +667,57 @@ void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
 void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
 void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
 
+/* FAA specific APIs */
+bfa_status_t bfa_faa_enable(struct bfa_s *bfa,
+                       bfa_cb_iocfc_t cbfn, void *cbarg);
+bfa_status_t bfa_faa_disable(struct bfa_s *bfa,
+                       bfa_cb_iocfc_t cbfn, void *cbarg);
+bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
+                       bfa_cb_iocfc_t cbfn, void *cbarg);
+
+/*
+ *     FC DIAG data structure
+ */
+struct bfa_fcdiag_qtest_s {
+       struct bfa_diag_qtest_result_s *result;
+       bfa_cb_diag_t   cbfn;
+       void            *cbarg;
+       struct bfa_timer_s      timer;
+       u32     status;
+       u32     count;
+       u8      lock;
+       u8      queue;
+       u8      all;
+       u8      timer_active;
+};
+
+struct bfa_fcdiag_lb_s {
+       bfa_cb_diag_t   cbfn;
+       void            *cbarg;
+       void            *result;
+       bfa_boolean_t   lock;
+       u32        status;
+};
+
+struct bfa_fcdiag_s {
+       struct bfa_s    *bfa;           /* Back pointer to BFA */
+       struct bfa_trc_mod_s   *trcmod;
+       struct bfa_fcdiag_lb_s lb;
+       struct bfa_fcdiag_qtest_s qtest;
+};
+
+#define BFA_FCDIAG_MOD(__bfa)  (&(__bfa)->modules.fcdiag)
+
+void   bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+
+bfa_status_t   bfa_fcdiag_loopback(struct bfa_s *bfa,
+                               enum bfa_port_opmode opmode,
+                               enum bfa_port_speed speed, u32 lpcnt, u32 pat,
+                               struct bfa_diag_loopback_result_s *result,
+                               bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t   bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 ignore,
+                       u32 queue, struct bfa_diag_qtest_result_s *result,
+                       bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t   bfa_fcdiag_lb_is_running(struct bfa_s *bfa);
+
 #endif /* __BFA_SVC_H__ */
index 59b5e9b61d71dc3385959a2dab5117af981b74e1..66fb72531b34caab0323797761d68ac2ab0e6fbc 100644 (file)
@@ -56,14 +56,15 @@ int         fdmi_enable = BFA_TRUE;
 int            pcie_max_read_reqsz;
 int            bfa_debugfs_enable = 1;
 int            msix_disable_cb = 0, msix_disable_ct = 0;
+int            max_xfer_size = BFAD_MAX_SECTORS >> 1;
 
 /* Firmware releated */
-u32    bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size;
-u32     *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc;
+u32    bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
+u32    *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
 
-#define BFAD_FW_FILE_CT_FC      "ctfw_fc.bin"
-#define BFAD_FW_FILE_CT_CNA     "ctfw_cna.bin"
-#define BFAD_FW_FILE_CB_FC      "cbfw_fc.bin"
+#define BFAD_FW_FILE_CB                "cbfw.bin"
+#define BFAD_FW_FILE_CT                "ctfw.bin"
+#define BFAD_FW_FILE_CT2       "ct2fw.bin"
 
 static u32 *bfad_load_fwimg(struct pci_dev *pdev);
 static void bfad_free_fwimg(void);
@@ -71,18 +72,18 @@ static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
                u32 *bfi_image_size, char *fw_name);
 
 static const char *msix_name_ct[] = {
+       "ctrl",
        "cpe0", "cpe1", "cpe2", "cpe3",
-       "rme0", "rme1", "rme2", "rme3",
-       "ctrl" };
+       "rme0", "rme1", "rme2", "rme3" };
 
 static const char *msix_name_cb[] = {
        "cpe0", "cpe1", "cpe2", "cpe3",
        "rme0", "rme1", "rme2", "rme3",
        "eemc", "elpu0", "elpu1", "epss", "mlpu" };
 
-MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC);
-MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA);
-MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC);
+MODULE_FIRMWARE(BFAD_FW_FILE_CB);
+MODULE_FIRMWARE(BFAD_FW_FILE_CT);
+MODULE_FIRMWARE(BFAD_FW_FILE_CT2);
 
 module_param(os_name, charp, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
@@ -144,6 +145,9 @@ MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
 module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
                " Range[false:0|true:1]");
+module_param(max_xfer_size, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_xfer_size, "default=32MB,"
+               " Range[64k|128k|256k|512k|1024k|2048k]");
 
 static void
 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
@@ -527,28 +531,26 @@ bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
 void
 bfad_hal_mem_release(struct bfad_s *bfad)
 {
-       int             i;
        struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
-       struct bfa_mem_elem_s *meminfo_elem;
-
-       for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
-               meminfo_elem = &hal_meminfo->meminfo[i];
-               if (meminfo_elem->kva != NULL) {
-                       switch (meminfo_elem->mem_type) {
-                       case BFA_MEM_TYPE_KVA:
-                               vfree(meminfo_elem->kva);
-                               break;
-                       case BFA_MEM_TYPE_DMA:
-                               dma_free_coherent(&bfad->pcidev->dev,
-                                       meminfo_elem->mem_len,
-                                       meminfo_elem->kva,
-                                       (dma_addr_t) meminfo_elem->dma);
-                               break;
-                       default:
-                               WARN_ON(1);
-                               break;
-                       }
-               }
+       struct bfa_mem_dma_s *dma_info, *dma_elem;
+       struct bfa_mem_kva_s *kva_info, *kva_elem;
+       struct list_head *dm_qe, *km_qe;
+
+       dma_info = &hal_meminfo->dma_info;
+       kva_info = &hal_meminfo->kva_info;
+
+       /* Iterate through the KVA meminfo queue */
+       list_for_each(km_qe, &kva_info->qe) {
+               kva_elem = (struct bfa_mem_kva_s *) km_qe;
+               vfree(kva_elem->kva);
+       }
+
+       /* Iterate through the DMA meminfo queue */
+       list_for_each(dm_qe, &dma_info->qe) {
+               dma_elem = (struct bfa_mem_dma_s *) dm_qe;
+               dma_free_coherent(&bfad->pcidev->dev,
+                               dma_elem->mem_len, dma_elem->kva,
+                               (dma_addr_t) dma_elem->dma);
        }
 
        memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
@@ -563,15 +565,15 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
                bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
        if (num_tms > 0)
                bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
-       if (num_fcxps > 0)
+       if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX)
                bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
-       if (num_ufbufs > 0)
+       if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX)
                bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
        if (reqq_size > 0)
                bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
        if (rspq_size > 0)
                bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
-       if (num_sgpgs > 0)
+       if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX)
                bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
 
        /*
@@ -591,85 +593,46 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
 bfa_status_t
 bfad_hal_mem_alloc(struct bfad_s *bfad)
 {
-       int             i;
        struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
-       struct bfa_mem_elem_s *meminfo_elem;
-       dma_addr_t      phys_addr;
-       void           *kva;
+       struct bfa_mem_dma_s *dma_info, *dma_elem;
+       struct bfa_mem_kva_s *kva_info, *kva_elem;
+       struct list_head *dm_qe, *km_qe;
        bfa_status_t    rc = BFA_STATUS_OK;
-       int retry_count = 0;
-       int reset_value = 1;
-       int min_num_sgpgs = 512;
+       dma_addr_t      phys_addr;
 
        bfa_cfg_get_default(&bfad->ioc_cfg);
-
-retry:
        bfad_update_hal_cfg(&bfad->ioc_cfg);
        bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
-       bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo);
-
-       for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
-               meminfo_elem = &hal_meminfo->meminfo[i];
-               switch (meminfo_elem->mem_type) {
-               case BFA_MEM_TYPE_KVA:
-                       kva = vmalloc(meminfo_elem->mem_len);
-                       if (kva == NULL) {
-                               bfad_hal_mem_release(bfad);
-                               rc = BFA_STATUS_ENOMEM;
-                               goto ext;
-                       }
-                       memset(kva, 0, meminfo_elem->mem_len);
-                       meminfo_elem->kva = kva;
-                       break;
-               case BFA_MEM_TYPE_DMA:
-                       kva = dma_alloc_coherent(&bfad->pcidev->dev,
-                               meminfo_elem->mem_len, &phys_addr, GFP_KERNEL);
-                       if (kva == NULL) {
-                               bfad_hal_mem_release(bfad);
-                               /*
-                                * If we cannot allocate with default
-                                * num_sgpages try with half the value.
-                                */
-                               if (num_sgpgs > min_num_sgpgs) {
-                                       printk(KERN_INFO
-                                       "bfad[%d]: memory allocation failed"
-                                       " with num_sgpgs: %d\n",
-                                               bfad->inst_no, num_sgpgs);
-                                       nextLowerInt(&num_sgpgs);
-                                       printk(KERN_INFO
-                                       "bfad[%d]: trying to allocate memory"
-                                       " with num_sgpgs: %d\n",
-                                               bfad->inst_no, num_sgpgs);
-                                       retry_count++;
-                                       goto retry;
-                               } else {
-                                       if (num_sgpgs_parm > 0)
-                                               num_sgpgs = num_sgpgs_parm;
-                                       else {
-                                               reset_value =
-                                                       (1 << retry_count);
-                                               num_sgpgs *= reset_value;
-                                       }
-                                       rc = BFA_STATUS_ENOMEM;
-                                       goto ext;
-                               }
-                       }
-
-                       if (num_sgpgs_parm > 0)
-                               num_sgpgs = num_sgpgs_parm;
-                       else {
-                               reset_value = (1 << retry_count);
-                               num_sgpgs *= reset_value;
-                       }
-
-                       memset(kva, 0, meminfo_elem->mem_len);
-                       meminfo_elem->kva = kva;
-                       meminfo_elem->dma = phys_addr;
-                       break;
-               default:
-                       break;
+       bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa);
+
+       dma_info = &hal_meminfo->dma_info;
+       kva_info = &hal_meminfo->kva_info;
+
+       /* Iterate through the KVA meminfo queue */
+       list_for_each(km_qe, &kva_info->qe) {
+               kva_elem = (struct bfa_mem_kva_s *) km_qe;
+               kva_elem->kva = vmalloc(kva_elem->mem_len);
+               if (kva_elem->kva == NULL) {
+                       bfad_hal_mem_release(bfad);
+                       rc = BFA_STATUS_ENOMEM;
+                       goto ext;
+               }
+               memset(kva_elem->kva, 0, kva_elem->mem_len);
+       }
 
+       /* Iterate through the DMA meminfo queue */
+       list_for_each(dm_qe, &dma_info->qe) {
+               dma_elem = (struct bfa_mem_dma_s *) dm_qe;
+               dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev,
+                                               dma_elem->mem_len,
+                                               &phys_addr, GFP_KERNEL);
+               if (dma_elem->kva == NULL) {
+                       bfad_hal_mem_release(bfad);
+                       rc = BFA_STATUS_ENOMEM;
+                       goto ext;
                }
+               dma_elem->dma = phys_addr;
+               memset(dma_elem->kva, 0, dma_elem->mem_len);
        }
 ext:
        return rc;
@@ -780,13 +743,17 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
        pci_set_master(pdev);
 
 
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
-               if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+       if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
+           (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
+               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
+                  (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
                        printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
                        goto out_release_region;
                }
+       }
 
        bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+       bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
 
        if (bfad->pci_bar0_kva == NULL) {
                printk(KERN_ERR "Fail to map bar0\n");
@@ -797,6 +764,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
        bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
        bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
        bfad->hal_pcidev.device_id = pdev->device;
+       bfad->hal_pcidev.ssid = pdev->subsystem_device;
        bfad->pci_name = pci_name(pdev);
 
        bfad->pci_attr.vendor_id = pdev->vendor;
@@ -868,6 +836,7 @@ void
 bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
 {
        pci_iounmap(pdev, bfad->pci_bar0_kva);
+       pci_iounmap(pdev, bfad->pci_bar2_kva);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
        pci_set_drvdata(pdev, NULL);
@@ -908,12 +877,29 @@ bfad_drv_init(struct bfad_s *bfad)
        bfad->bfa_fcs.trcmod = bfad->trcmod;
        bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
        bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
+       bfa_fcs_init(&bfad->bfa_fcs);
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
        bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
 
+       /* configure base port */
+       rc = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
+       if (rc != BFA_STATUS_OK)
+               goto out_cfg_pport_fail;
+
        return BFA_STATUS_OK;
 
+out_cfg_pport_fail:
+       /* fcs exit - on cfg pport failure */
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       init_completion(&bfad->comp);
+       bfad->pport.flags |= BFAD_PORT_DELETE;
+       bfa_fcs_exit(&bfad->bfa_fcs);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       wait_for_completion(&bfad->comp);
+       /* bfa detach - free hal memory */
+       bfa_detach(&bfad->bfa);
+       bfad_hal_mem_release(bfad);
 out_hal_mem_alloc_failure:
        return BFA_STATUS_FAILED;
 }
@@ -945,6 +931,7 @@ bfad_drv_start(struct bfad_s *bfad)
 
        spin_lock_irqsave(&bfad->bfad_lock, flags);
        bfa_iocfc_start(&bfad->bfa);
+       bfa_fcs_pbc_vport_init(&bfad->bfa_fcs);
        bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
        bfad->bfad_flags |= BFAD_HAL_START_DONE;
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -1032,6 +1019,12 @@ bfad_start_ops(struct bfad_s *bfad) {
        struct bfad_vport_s *vport, *vport_new;
        struct bfa_fcs_driver_info_s driver_info;
 
+       /* Limit min/max. xfer size to [64k-32MB] */
+       if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
+               max_xfer_size = BFAD_MIN_SECTORS >> 1;
+       if (max_xfer_size > BFAD_MAX_SECTORS >> 1)
+               max_xfer_size = BFAD_MAX_SECTORS >> 1;
+
        /* Fill the driver_info info to fcs*/
        memset(&driver_info, 0, sizeof(driver_info));
        strncpy(driver_info.version, BFAD_DRIVER_VERSION,
@@ -1049,19 +1042,19 @@ bfad_start_ops(struct bfad_s *bfad) {
        strncpy(driver_info.os_device_name, bfad->pci_name,
                sizeof(driver_info.os_device_name - 1));
 
-       /* FCS INIT */
+       /* FCS driver info init */
        spin_lock_irqsave(&bfad->bfad_lock, flags);
        bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
-       bfa_fcs_init(&bfad->bfa_fcs);
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
-       retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
-       if (retval != BFA_STATUS_OK) {
-               if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
-                       bfa_sm_set_state(bfad, bfad_sm_failed);
-               bfad_stop(bfad);
-               return BFA_STATUS_FAILED;
-       }
+       /*
+        * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
+        * with values learned during bfa_init firmware GETATTR REQ.
+        */
+       bfa_fcs_update_cfg(&bfad->bfa_fcs);
+
+       /* Setup fc host fixed attribute if the lk supports */
+       bfad_fc_host_init(bfad->pport.im_port);
 
        /* BFAD level FC4 IM specific resource allocation */
        retval = bfad_im_probe(bfad);
@@ -1233,8 +1226,8 @@ bfad_install_msix_handler(struct bfad_s *bfad)
        for (i = 0; i < bfad->nvec; i++) {
                sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
                                bfad->pci_name,
-                               ((bfa_asic_id_ct(bfad->hal_pcidev.device_id)) ?
-                               msix_name_ct[i] : msix_name_cb[i]));
+                               ((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ?
+                               msix_name_cb[i] : msix_name_ct[i]));
 
                error = request_irq(bfad->msix_tab[i].msix.vector,
                                    (irq_handler_t) bfad_msix, 0,
@@ -1248,6 +1241,9 @@ bfad_install_msix_handler(struct bfad_s *bfad)
                                free_irq(bfad->msix_tab[j].msix.vector,
                                                &bfad->msix_tab[j]);
 
+                       bfad->bfad_flags &= ~BFAD_MSIX_ON;
+                       pci_disable_msix(bfad->pcidev);
+
                        return 1;
                }
        }
@@ -1265,6 +1261,7 @@ bfad_setup_intr(struct bfad_s *bfad)
        u32 mask = 0, i, num_bit = 0, max_bit = 0;
        struct msix_entry msix_entries[MAX_MSIX_ENTRY];
        struct pci_dev *pdev = bfad->pcidev;
+       u16     reg;
 
        /* Call BFA to get the msix map for this PCI function.  */
        bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
@@ -1272,8 +1269,8 @@ bfad_setup_intr(struct bfad_s *bfad)
        /* Set up the msix entry table */
        bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
 
-       if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) ||
-           (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) {
+       if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) ||
+          (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) {
 
                error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
                if (error) {
@@ -1294,6 +1291,13 @@ bfad_setup_intr(struct bfad_s *bfad)
                        goto line_based;
                }
 
+               /* Disable INTX in MSI-X mode */
+               pci_read_config_word(pdev, PCI_COMMAND, &reg);
+
+               if (!(reg & PCI_COMMAND_INTX_DISABLE))
+                       pci_write_config_word(pdev, PCI_COMMAND,
+                               reg | PCI_COMMAND_INTX_DISABLE);
+
                /* Save the vectors */
                for (i = 0; i < bfad->nvec; i++) {
                        bfa_trc(bfad, msix_entries[i].vector);
@@ -1315,6 +1319,7 @@ line_based:
                /* Enable interrupt handler failed */
                return 1;
        }
+       bfad->bfad_flags |= BFAD_INTX_ON;
 
        return error;
 }
@@ -1331,7 +1336,7 @@ bfad_remove_intr(struct bfad_s *bfad)
 
                pci_disable_msix(bfad->pcidev);
                bfad->bfad_flags &= ~BFAD_MSIX_ON;
-       } else {
+       } else if (bfad->bfad_flags & BFAD_INTX_ON) {
                free_irq(bfad->pcidev->irq, bfad);
        }
 }
@@ -1343,7 +1348,7 @@ int
 bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
 {
        struct bfad_s   *bfad;
-       int             error = -ENODEV, retval;
+       int             error = -ENODEV, retval, i;
 
        /* For single port cards - only claim function 0 */
        if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
@@ -1367,6 +1372,12 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
        bfa_trc_init(bfad->trcmod);
        bfa_trc(bfad, bfad_inst);
 
+       /* AEN INIT */
+       INIT_LIST_HEAD(&bfad->free_aen_q);
+       INIT_LIST_HEAD(&bfad->active_aen_q);
+       for (i = 0; i < BFA_AEN_MAX_ENTRY; i++)
+               list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q);
+
        if (!(bfad_load_fwimg(pdev))) {
                kfree(bfad->trcmod);
                goto out_alloc_trace_failure;
@@ -1501,6 +1512,14 @@ struct pci_device_id bfad_id_table[] = {
                .class = (PCI_CLASS_SERIAL_FIBER << 8),
                .class_mask = ~0,
        },
+       {
+               .vendor = BFA_PCI_VENDOR_ID_BROCADE,
+               .device = BFA_PCI_DEVICE_ID_CT2,
+               .subvendor = PCI_ANY_ID,
+               .subdevice = PCI_ANY_ID,
+               .class = (PCI_CLASS_SERIAL_FIBER << 8),
+               .class_mask = ~0,
+       },
 
        {0, 0},
 };
@@ -1594,33 +1613,33 @@ out:
 static u32 *
 bfad_load_fwimg(struct pci_dev *pdev)
 {
-       if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
-               if (bfi_image_ct_fc_size == 0)
-                       bfad_read_firmware(pdev, &bfi_image_ct_fc,
-                               &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC);
-               return bfi_image_ct_fc;
-       } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
-               if (bfi_image_ct_cna_size == 0)
-                       bfad_read_firmware(pdev, &bfi_image_ct_cna,
-                               &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA);
-               return bfi_image_ct_cna;
+       if (pdev->device == BFA_PCI_DEVICE_ID_CT2) {
+               if (bfi_image_ct2_size == 0)
+                       bfad_read_firmware(pdev, &bfi_image_ct2,
+                               &bfi_image_ct2_size, BFAD_FW_FILE_CT2);
+               return bfi_image_ct2;
+       } else if (bfa_asic_id_ct(pdev->device)) {
+               if (bfi_image_ct_size == 0)
+                       bfad_read_firmware(pdev, &bfi_image_ct,
+                               &bfi_image_ct_size, BFAD_FW_FILE_CT);
+               return bfi_image_ct;
        } else {
-               if (bfi_image_cb_fc_size == 0)
-                       bfad_read_firmware(pdev, &bfi_image_cb_fc,
-                               &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC);
-               return bfi_image_cb_fc;
+               if (bfi_image_cb_size == 0)
+                       bfad_read_firmware(pdev, &bfi_image_cb,
+                               &bfi_image_cb_size, BFAD_FW_FILE_CB);
+               return bfi_image_cb;
        }
 }
 
 static void
 bfad_free_fwimg(void)
 {
-       if (bfi_image_ct_fc_size && bfi_image_ct_fc)
-               vfree(bfi_image_ct_fc);
-       if (bfi_image_ct_cna_size && bfi_image_ct_cna)
-               vfree(bfi_image_ct_cna);
-       if (bfi_image_cb_fc_size && bfi_image_cb_fc)
-               vfree(bfi_image_cb_fc);
+       if (bfi_image_ct2_size && bfi_image_ct2)
+               vfree(bfi_image_ct2);
+       if (bfi_image_ct_size && bfi_image_ct)
+               vfree(bfi_image_ct);
+       if (bfi_image_cb_size && bfi_image_cb)
+               vfree(bfi_image_cb);
 }
 
 module_init(bfad_init);
index a94ea4235433ad9bec99768ff3d25f2bb3aaefb0..9d95844ab463ededc29b22e417d413e912b7cc48 100644 (file)
@@ -218,6 +218,9 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
        case BFA_PORT_SPEED_10GBPS:
                fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
                break;
+       case BFA_PORT_SPEED_16GBPS:
+               fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+               break;
        case BFA_PORT_SPEED_8GBPS:
                fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
                break;
@@ -580,6 +583,8 @@ struct fc_function_template bfad_im_fc_function_template = {
        .vport_create = bfad_im_vport_create,
        .vport_delete = bfad_im_vport_delete,
        .vport_disable = bfad_im_vport_disable,
+       .bsg_request = bfad_im_bsg_request,
+       .bsg_timeout = bfad_im_bsg_timeout,
 };
 
 struct fc_function_template bfad_im_vport_fc_function_template = {
@@ -674,8 +679,10 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
        struct bfad_s *bfad = im_port->bfad;
        char model[BFA_ADAPTER_MODEL_NAME_LEN];
        char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
+       int nports = 0;
 
        bfa_get_adapter_model(&bfad->bfa, model);
+       nports = bfa_get_nports(&bfad->bfa);
        if (!strcmp(model, "Brocade-425"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
                        "Brocade 4Gbps PCIe dual port FC HBA");
@@ -684,10 +691,10 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
                        "Brocade 8Gbps PCIe dual port FC HBA");
        else if (!strcmp(model, "Brocade-42B"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-                       "HP 4Gbps PCIe dual port FC HBA");
+                       "Brocade 4Gbps PCIe dual port FC HBA for HP");
        else if (!strcmp(model, "Brocade-82B"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-                       "HP 8Gbps PCIe dual port FC HBA");
+                       "Brocade 8Gbps PCIe dual port FC HBA for HP");
        else if (!strcmp(model, "Brocade-1010"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
                        "Brocade 10Gbps single port CNA");
@@ -696,7 +703,7 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
                        "Brocade 10Gbps dual port CNA");
        else if (!strcmp(model, "Brocade-1007"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-                       "Brocade 10Gbps CNA");
+                       "Brocade 10Gbps CNA for IBM Blade Center");
        else if (!strcmp(model, "Brocade-415"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
                        "Brocade 4Gbps PCIe single port FC HBA");
@@ -705,17 +712,45 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
                        "Brocade 8Gbps PCIe single port FC HBA");
        else if (!strcmp(model, "Brocade-41B"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-                       "HP 4Gbps PCIe single port FC HBA");
+                       "Brocade 4Gbps PCIe single port FC HBA for HP");
        else if (!strcmp(model, "Brocade-81B"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-                       "HP 8Gbps PCIe single port FC HBA");
+                       "Brocade 8Gbps PCIe single port FC HBA for HP");
        else if (!strcmp(model, "Brocade-804"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-                       "HP Bladesystem C-class 8Gbps FC HBA");
-       else if (!strcmp(model, "Brocade-902"))
+                       "Brocade 8Gbps FC HBA for HP Bladesystem C-class");
+       else if (!strcmp(model, "Brocade-902") ||
+                !strcmp(model, "Brocade-1741"))
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-                       "Brocade 10Gbps CNA");
-       else
+                       "Brocade 10Gbps CNA for Dell M-Series Blade Servers");
+       else if (strstr(model, "Brocade-1560")) {
+               if (nports == 1)
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 16Gbps PCIe single port FC HBA");
+               else
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 16Gbps PCIe dual port FC HBA");
+       } else if (strstr(model, "Brocade-1710")) {
+               if (nports == 1)
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 10Gbps single port CNA");
+               else
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 10Gbps dual port CNA");
+       } else if (strstr(model, "Brocade-1860")) {
+               if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc))
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 10Gbps single port CNA");
+               else if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 16Gbps PCIe single port FC HBA");
+               else if (nports == 2 && bfa_ioc_is_cna(&bfad->bfa.ioc))
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 10Gbps dual port CNA");
+               else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 16Gbps PCIe dual port FC HBA");
+       } else
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
                        "Invalid Model");
 
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
new file mode 100644 (file)
index 0000000..06fc00c
--- /dev/null
@@ -0,0 +1,3235 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/uaccess.h>
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfad_bsg.h"
+
+BFA_TRC_FILE(LDRV, BSG);
+
+int
+bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       int     rc = 0;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       /* If IOC is not in disabled state - return */
+       if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_IOC_FAILURE;
+               return rc;
+       }
+
+       init_completion(&bfad->enable_comp);
+       bfa_iocfc_enable(&bfad->bfa);
+       iocmd->status = BFA_STATUS_OK;
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       wait_for_completion(&bfad->enable_comp);
+
+       return rc;
+}
+
+int
+bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       int     rc = 0;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       if (bfad->disable_active) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               return -EBUSY;
+       }
+
+       bfad->disable_active = BFA_TRUE;
+       init_completion(&bfad->disable_comp);
+       bfa_iocfc_disable(&bfad->bfa);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       wait_for_completion(&bfad->disable_comp);
+       bfad->disable_active = BFA_FALSE;
+       iocmd->status = BFA_STATUS_OK;
+
+       return rc;
+}
+
+static int
+bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
+{
+       int     i;
+       struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
+       struct bfad_im_port_s   *im_port;
+       struct bfa_port_attr_s  pattr;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       bfa_fcport_get_attr(&bfad->bfa, &pattr);
+       iocmd->nwwn = pattr.nwwn;
+       iocmd->pwwn = pattr.pwwn;
+       iocmd->ioc_type = bfa_get_type(&bfad->bfa);
+       iocmd->mac = bfa_get_mac(&bfad->bfa);
+       iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
+       bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
+       iocmd->factorynwwn = pattr.factorynwwn;
+       iocmd->factorypwwn = pattr.factorypwwn;
+       iocmd->bfad_num = bfad->inst_no;
+       im_port = bfad->pport.im_port;
+       iocmd->host = im_port->shost->host_no;
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       strcpy(iocmd->name, bfad->adapter_name);
+       strcpy(iocmd->port_name, bfad->port_name);
+       strcpy(iocmd->hwpath, bfad->pci_name);
+
+       /* set adapter hw path */
+       strcpy(iocmd->adapter_hwpath, bfad->pci_name);
+       i = strlen(iocmd->adapter_hwpath) - 1;
+       while (iocmd->adapter_hwpath[i] != '.')
+               i--;
+       iocmd->adapter_hwpath[i] = '\0';
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+static int
+bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       /* fill in driver attr info */
+       strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
+       strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
+               BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
+       strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
+               iocmd->ioc_attr.adapter_attr.fw_ver);
+       strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
+               iocmd->ioc_attr.adapter_attr.optrom_ver);
+
+       /* copy chip rev info first otherwise it will be overwritten */
+       memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
+               sizeof(bfad->pci_attr.chip_rev));
+       memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
+               sizeof(struct bfa_ioc_pci_attr_s));
+
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
+
+       bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
+                       unsigned int payload_len)
+{
+       struct bfa_bsg_ioc_fwstats_s *iocmd =
+                       (struct bfa_bsg_ioc_fwstats_s *)cmd;
+       void    *iocmd_bufptr;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_ioc_fwstats_s),
+                       sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               goto out;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       if (iocmd->status != BFA_STATUS_OK) {
+               bfa_trc(bfad, iocmd->status);
+               goto out;
+       }
+out:
+       bfa_trc(bfad, 0x6666);
+       return 0;
+}
+
+int
+bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       unsigned long   flags;
+
+       if (v_cmd == IOCMD_IOC_RESET_STATS) {
+               bfa_ioc_clear_stats(&bfad->bfa);
+               iocmd->status = BFA_STATUS_OK;
+       } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
+               spin_lock_irqsave(&bfad->bfad_lock, flags);
+               iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       }
+
+       return 0;
+}
+
+int
+bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+       struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
+
+       if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
+               strcpy(bfad->adapter_name, iocmd->name);
+       else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
+               strcpy(bfad->port_name, iocmd->name);
+
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
+
+       iocmd->status = BFA_STATUS_OK;
+       bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
+
+       return 0;
+}
+
+int
+bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       return 0;
+}
+
+int
+bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
+                                       bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK) {
+               bfa_trc(bfad, iocmd->status);
+               return 0;
+       }
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+       return 0;
+}
+
+int
+bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       if (iocmd->status != BFA_STATUS_OK) {
+               bfa_trc(bfad, iocmd->status);
+               return 0;
+       }
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+       return 0;
+}
+
+static int
+bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
+       struct bfa_lport_attr_s port_attr;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
+       bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
+               iocmd->attr.pid = port_attr.pid;
+       else
+               iocmd->attr.pid = 0;
+
+       iocmd->attr.port_type = port_attr.port_type;
+       iocmd->attr.loopback = port_attr.loopback;
+       iocmd->attr.authfail = port_attr.authfail;
+       strncpy(iocmd->attr.port_symname.symname,
+               port_attr.port_cfg.sym_name.symname,
+               sizeof(port_attr.port_cfg.sym_name.symname));
+
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
+                       unsigned int payload_len)
+{
+       struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       void    *iocmd_bufptr;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_port_stats_s),
+                       sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
+                               iocmd_bufptr, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK) {
+               bfa_trc(bfad, iocmd->status);
+               goto out;
+       }
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
+                                       bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK) {
+               bfa_trc(bfad, iocmd->status);
+               return 0;
+       }
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+       return 0;
+}
+
+int
+bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
+{
+       struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       if (v_cmd == IOCMD_PORT_CFG_TOPO)
+               cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
+       else if (v_cmd == IOCMD_PORT_CFG_SPEED)
+               cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
+       else if (v_cmd == IOCMD_PORT_CFG_ALPA)
+               cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
+       else if (v_cmd == IOCMD_PORT_CLR_ALPA)
+               cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       return 0;
+}
+
+int
+bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
+                               (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       return 0;
+}
+
+int
+bfad_iocmd_port_cfg_bbsc(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
+               if (v_cmd == IOCMD_PORT_BBSC_ENABLE)
+                       fcport->cfg.bb_scn_state = BFA_TRUE;
+               else if (v_cmd == IOCMD_PORT_BBSC_DISABLE)
+                       fcport->cfg.bb_scn_state = BFA_FALSE;
+       }
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+static int
+bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_fcs_lport_s  *fcs_port;
+       struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (fcs_port == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_bsg_lport_stats_s *iocmd =
+                       (struct bfa_bsg_lport_stats_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (fcs_port == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_bsg_reset_stats_s *iocmd =
+                       (struct bfa_bsg_reset_stats_s *)cmd;
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+       struct list_head *qe, *qen;
+       struct bfa_itnim_s *itnim;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->vpwwn);
+       if (fcs_port == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       bfa_fcs_lport_clear_stats(fcs_port);
+       /* clear IO stats from all active itnims */
+       list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+               itnim = (struct bfa_itnim_s *) qe;
+               if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
+                       continue;
+               bfa_itnim_clear_stats(itnim);
+       }
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_bsg_lport_iostats_s *iocmd =
+                       (struct bfa_bsg_lport_iostats_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (fcs_port == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
+                       fcs_port->lp_tag);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
+                       unsigned int payload_len)
+{
+       struct bfa_bsg_lport_get_rports_s *iocmd =
+                       (struct bfa_bsg_lport_get_rports_s *)cmd;
+       struct bfa_fcs_lport_s *fcs_port;
+       unsigned long   flags;
+       void    *iocmd_bufptr;
+
+       if (iocmd->nrports == 0)
+               return -EINVAL;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_lport_get_rports_s),
+                       sizeof(wwn_t) * iocmd->nrports) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       iocmd_bufptr = (char *)iocmd +
+                       sizeof(struct bfa_bsg_lport_get_rports_s);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (fcs_port == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               bfa_trc(bfad, 0);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       bfa_fcs_lport_get_rports(fcs_port, (wwn_t *)iocmd_bufptr,
+                               &iocmd->nrports);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_fcs_rport_s *fcs_rport;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (fcs_port == NULL) {
+               bfa_trc(bfad, 0);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+       if (fcs_rport == NULL) {
+               bfa_trc(bfad, 0);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+               goto out;
+       }
+
+       bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+static int
+bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_rport_scsi_addr_s *iocmd =
+                       (struct bfa_bsg_rport_scsi_addr_s *)cmd;
+       struct bfa_fcs_lport_s  *fcs_port;
+       struct bfa_fcs_itnim_s  *fcs_itnim;
+       struct bfad_itnim_s     *drv_itnim;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (fcs_port == NULL) {
+               bfa_trc(bfad, 0);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+       if (fcs_itnim == NULL) {
+               bfa_trc(bfad, 0);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+               goto out;
+       }
+
+       drv_itnim = fcs_itnim->itnim_drv;
+
+       if (drv_itnim && drv_itnim->im_port)
+               iocmd->host = drv_itnim->im_port->shost->host_no;
+       else {
+               bfa_trc(bfad, 0);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+               goto out;
+       }
+
+       iocmd->target = drv_itnim->scsi_tgt_id;
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       iocmd->bus = 0;
+       iocmd->lun = 0;
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_rport_stats_s *iocmd =
+                       (struct bfa_bsg_rport_stats_s *)cmd;
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_fcs_rport_s *fcs_rport;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (fcs_port == NULL) {
+               bfa_trc(bfad, 0);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+       if (fcs_rport == NULL) {
+               bfa_trc(bfad, 0);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+               goto out;
+       }
+
+       memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
+               sizeof(struct bfa_rport_stats_s));
+       memcpy((void *)&iocmd->stats.hal_stats,
+              (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
+              sizeof(struct bfa_rport_hal_stats_s));
+
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_rport_reset_stats_s *iocmd =
+                               (struct bfa_bsg_rport_reset_stats_s *)cmd;
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_fcs_rport_s *fcs_rport;
+       struct bfa_rport_s *rport;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (fcs_port == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+       if (fcs_rport == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+               goto out;
+       }
+
+       memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
+       rport = bfa_fcs_rport_get_halrport(fcs_rport);
+       memset(&rport->stats, 0, sizeof(rport->stats));
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_rport_set_speed_s *iocmd =
+                               (struct bfa_bsg_rport_set_speed_s *)cmd;
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_fcs_rport_s *fcs_rport;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (fcs_port == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               goto out;
+       }
+
+       fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+       if (fcs_rport == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+               goto out;
+       }
+
+       fcs_rport->rpf.assigned_speed  = iocmd->speed;
+       /* Set this speed in f/w only if the RPSC speed is not available */
+       if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
+               bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_fcs_vport_s *fcs_vport;
+       struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->vpwwn);
+       if (fcs_vport == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+               goto out;
+       }
+
+       bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_fcs_vport_s *fcs_vport;
+       struct bfa_bsg_vport_stats_s *iocmd =
+                               (struct bfa_bsg_vport_stats_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->vpwwn);
+       if (fcs_vport == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+               goto out;
+       }
+
+       memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
+               sizeof(struct bfa_vport_stats_s));
+       memcpy((void *)&iocmd->vport_stats.port_stats,
+              (void *)&fcs_vport->lport.stats,
+               sizeof(struct bfa_lport_stats_s));
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_fcs_vport_s *fcs_vport;
+       struct bfa_bsg_reset_stats_s *iocmd =
+                               (struct bfa_bsg_reset_stats_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->vpwwn);
+       if (fcs_vport == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+               goto out;
+       }
+
+       memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
+       memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+static int
+bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
+                       unsigned int payload_len)
+{
+       struct bfa_bsg_fabric_get_lports_s *iocmd =
+                       (struct bfa_bsg_fabric_get_lports_s *)cmd;
+       bfa_fcs_vf_t    *fcs_vf;
+       uint32_t        nports = iocmd->nports;
+       unsigned long   flags;
+       void    *iocmd_bufptr;
+
+       if (nports == 0) {
+               iocmd->status = BFA_STATUS_EINVAL;
+               goto out;
+       }
+
+       if (bfad_chk_iocmd_sz(payload_len,
+               sizeof(struct bfa_bsg_fabric_get_lports_s),
+               sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               goto out;
+       }
+
+       iocmd_bufptr = (char *)iocmd +
+                       sizeof(struct bfa_bsg_fabric_get_lports_s);
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+       if (fcs_vf == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+               goto out;
+       }
+       bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       iocmd->nports = nports;
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+       if (cmd == IOCMD_RATELIM_ENABLE)
+               fcport->cfg.ratelimit = BFA_TRUE;
+       else if (cmd == IOCMD_RATELIM_DISABLE)
+               fcport->cfg.ratelimit = BFA_FALSE;
+
+       if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
+               fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
+
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+
+       return 0;
+}
+
+int
+bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+       struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+       /* Auto and speeds greater than the supported speed, are invalid */
+       if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
+           (iocmd->speed > fcport->speed_sup)) {
+               iocmd->status = BFA_STATUS_UNSUPP_SPEED;
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               return 0;
+       }
+
+       fcport->cfg.trl_def_speed = iocmd->speed;
+       iocmd->status = BFA_STATUS_OK;
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       return 0;
+}
+
+int
+bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_fcpim_modstats_s *iocmd =
+                       (struct bfa_bsg_fcpim_modstats_s *)cmd;
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+       struct list_head *qe, *qen;
+       struct bfa_itnim_s *itnim;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       /* accumulate IO stats from itnim */
+       memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
+       list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+               itnim = (struct bfa_itnim_s *) qe;
+               bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
+       }
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
+                               (struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+       struct list_head *qe, *qen;
+       struct bfa_itnim_s *itnim;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+               itnim = (struct bfa_itnim_s *) qe;
+               bfa_itnim_clear_stats(itnim);
+       }
+       memset(&fcpim->del_itn_stats, 0,
+               sizeof(struct bfa_fcpim_del_itn_stats_s));
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
+                       (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
+       struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
+               sizeof(struct bfa_fcpim_del_itn_stats_s));
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
+       struct bfa_fcs_lport_s  *fcs_port;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->lpwwn);
+       if (!fcs_port)
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+       else
+               iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
+                                       iocmd->rpwwn, &iocmd->attr);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_itnim_iostats_s *iocmd =
+                       (struct bfa_bsg_itnim_iostats_s *)cmd;
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_fcs_itnim_s *itnim;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->lpwwn);
+       if (!fcs_port) {
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               bfa_trc(bfad, 0);
+       } else {
+               itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+               if (itnim == NULL)
+                       iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+               else {
+                       iocmd->status = BFA_STATUS_OK;
+                       memcpy((void *)&iocmd->iostats, (void *)
+                              &(bfa_fcs_itnim_get_halitn(itnim)->stats),
+                              sizeof(struct bfa_itnim_iostats_s));
+               }
+       }
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       return 0;
+}
+
+static int
+bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_rport_reset_stats_s *iocmd =
+                       (struct bfa_bsg_rport_reset_stats_s *)cmd;
+       struct bfa_fcs_lport_s  *fcs_port;
+       struct bfa_fcs_itnim_s  *itnim;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->pwwn);
+       if (!fcs_port)
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+       else {
+               itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+               if (itnim == NULL)
+                       iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+               else {
+                       iocmd->status = BFA_STATUS_OK;
+                       bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
+                       bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
+               }
+       }
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_itnim_itnstats_s *iocmd =
+                       (struct bfa_bsg_itnim_itnstats_s *)cmd;
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_fcs_itnim_s *itnim;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->lpwwn);
+       if (!fcs_port) {
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+               bfa_trc(bfad, 0);
+       } else {
+               itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+               if (itnim == NULL)
+                       iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+               else {
+                       iocmd->status = BFA_STATUS_OK;
+                       bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
+                                       &iocmd->itnstats);
+               }
+       }
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       return 0;
+}
+
+int
+bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       unsigned long flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_fcport_enable(&bfad->bfa);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       return 0;
+}
+
+int
+bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       unsigned long flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_fcport_disable(&bfad->bfa);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       return 0;
+}
+
+int
+bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
+                               &iocmd->pcifn_cfg,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
+                               &iocmd->pcifn_id, iocmd->port,
+                               iocmd->pcifn_class, iocmd->bandwidth,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
+                               iocmd->pcifn_id,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
+                               iocmd->pcifn_id, iocmd->bandwidth,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+       bfa_trc(bfad, iocmd->status);
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_adapter_cfg_mode_s *iocmd =
+                       (struct bfa_bsg_adapter_cfg_mode_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags = 0;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
+                               iocmd->cfg.mode, iocmd->cfg.max_pf,
+                               iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_port_cfg_mode_s *iocmd =
+                       (struct bfa_bsg_port_cfg_mode_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags = 0;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
+                               iocmd->instance, iocmd->cfg.mode,
+                               iocmd->cfg.max_pf, iocmd->cfg.max_vf,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
+               iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
+                                       bfad_hcb_comp, &fcomp);
+       else
+               iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
+                                       bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_faa_enable(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       unsigned long   flags;
+       struct bfad_hal_comp    fcomp;
+
+       init_completion(&fcomp.comp);
+       iocmd->status = BFA_STATUS_OK;
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_faa_enable(&bfad->bfa, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_faa_disable(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       unsigned long   flags;
+       struct bfad_hal_comp    fcomp;
+
+       init_completion(&fcomp.comp);
+       iocmd->status = BFA_STATUS_OK;
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_faa_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
+       struct bfad_hal_comp    fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       iocmd->status = BFA_STATUS_OK;
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+       struct bfa_bsg_cee_attr_s *iocmd =
+                               (struct bfa_bsg_cee_attr_s *)cmd;
+       void    *iocmd_bufptr;
+       struct bfad_hal_comp    cee_comp;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_cee_attr_s),
+                       sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
+
+       cee_comp.status = 0;
+       init_completion(&cee_comp.comp);
+       mutex_lock(&bfad_mutex);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
+                                        bfad_hcb_comp, &cee_comp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK) {
+               mutex_unlock(&bfad_mutex);
+               bfa_trc(bfad, 0x5555);
+               goto out;
+       }
+       wait_for_completion(&cee_comp.comp);
+       mutex_unlock(&bfad_mutex);
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
+                       unsigned int payload_len)
+{
+       struct bfa_bsg_cee_stats_s *iocmd =
+                               (struct bfa_bsg_cee_stats_s *)cmd;
+       void    *iocmd_bufptr;
+       struct bfad_hal_comp    cee_comp;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_cee_stats_s),
+                       sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
+
+       cee_comp.status = 0;
+       init_completion(&cee_comp.comp);
+       mutex_lock(&bfad_mutex);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
+                                       bfad_hcb_comp, &cee_comp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK) {
+               mutex_unlock(&bfad_mutex);
+               bfa_trc(bfad, 0x5555);
+               goto out;
+       }
+       wait_for_completion(&cee_comp.comp);
+       mutex_unlock(&bfad_mutex);
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               bfa_trc(bfad, 0x5555);
+       return 0;
+}
+
+int
+bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
+       struct bfad_hal_comp    fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+       if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
+               goto out;
+
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
+       struct bfad_hal_comp    fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+       if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_flash_attr_s *iocmd =
+                       (struct bfa_bsg_flash_attr_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
+                               iocmd->instance, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
+                       unsigned int payload_len)
+{
+       struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+       void    *iocmd_bufptr;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_flash_s),
+                       iocmd->bufsz) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+                               iocmd->type, iocmd->instance, iocmd_bufptr,
+                               iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
+                       unsigned int payload_len)
+{
+       struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       void    *iocmd_bufptr;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_flash_s),
+                       iocmd->bufsz) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
+                               iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_diag_get_temp_s *iocmd =
+                       (struct bfa_bsg_diag_get_temp_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
+                               &iocmd->result, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_diag_memtest_s *iocmd =
+                       (struct bfa_bsg_diag_memtest_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
+                               &iocmd->memtest, iocmd->pat,
+                               &iocmd->result, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_diag_loopback_s *iocmd =
+                       (struct bfa_bsg_diag_loopback_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
+                               iocmd->speed, iocmd->lpcnt, iocmd->pat,
+                               &iocmd->result, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_diag_fwping_s *iocmd =
+                       (struct bfa_bsg_diag_fwping_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
+                               iocmd->pattern, &iocmd->result,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       bfa_trc(bfad, 0x77771);
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
+                               iocmd->queue, &iocmd->result,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_sfp_show_s *iocmd =
+                       (struct bfa_bsg_sfp_show_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+       bfa_trc(bfad, iocmd->status);
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
+                               &iocmd->ledtest);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       return 0;
+}
+
+int
+bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_diag_beacon_s *iocmd =
+                       (struct bfa_bsg_diag_beacon_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
+                               iocmd->beacon, iocmd->link_e2e_beacon,
+                               iocmd->second);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       return 0;
+}
+
+int
+bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_diag_lb_stat_s *iocmd =
+                       (struct bfa_bsg_diag_lb_stat_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       bfa_trc(bfad, iocmd->status);
+
+       return 0;
+}
+
+int
+bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_phy_attr_s *iocmd =
+                       (struct bfa_bsg_phy_attr_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
+                               &iocmd->attr, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_phy_stats_s *iocmd =
+                       (struct bfa_bsg_phy_stats_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
+                               &iocmd->stats, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+       struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       void    *iocmd_bufptr;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_phy_s),
+                       iocmd->bufsz) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
+                               iocmd->instance, iocmd_bufptr, iocmd->bufsz,
+                               0, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_vhba_attr_s *iocmd =
+                       (struct bfa_bsg_vhba_attr_s *)cmd;
+       struct bfa_vhba_attr_s *attr = &iocmd->attr;
+       unsigned long flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       attr->pwwn =  bfad->bfa.ioc.attr->pwwn;
+       attr->nwwn =  bfad->bfa.ioc.attr->nwwn;
+       attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
+       attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
+       attr->path_tov  = bfa_fcpim_path_tov_get(&bfad->bfa);
+       iocmd->status = BFA_STATUS_OK;
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       return 0;
+}
+
+int
+bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+       struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
+       void    *iocmd_bufptr;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len,
+                       sizeof(struct bfa_bsg_phy_s),
+                       iocmd->bufsz) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
+                               iocmd->instance, iocmd_bufptr, iocmd->bufsz,
+                               0, bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
+       void *iocmd_bufptr;
+
+       if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
+               bfa_trc(bfad, sizeof(struct bfa_plog_s));
+               iocmd->status = BFA_STATUS_EINVAL;
+               goto out;
+       }
+
+       iocmd->status = BFA_STATUS_OK;
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
+       memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
+out:
+       return 0;
+}
+
+#define BFA_DEBUG_FW_CORE_CHUNK_SZ     0x4000U /* 16K chunks for FW dump */
+int
+bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
+                       unsigned int payload_len)
+{
+       struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
+       void    *iocmd_bufptr;
+       unsigned long   flags;
+
+       if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
+                       BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
+               iocmd->status = BFA_STATUS_VERSION_FAIL;
+               return 0;
+       }
+
+       if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
+                       !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
+                       !IS_ALIGNED(iocmd->offset, sizeof(u32))) {
+               bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
+               iocmd->status = BFA_STATUS_EINVAL;
+               goto out;
+       }
+
+       iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
+                               (u32 *)&iocmd->offset, &iocmd->bufsz);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       unsigned long   flags;
+
+       if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
+               spin_lock_irqsave(&bfad->bfad_lock, flags);
+               bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
+               bfad->plog_buf.head = bfad->plog_buf.tail = 0;
+       else if (v_cmd == IOCMD_DEBUG_START_DTRC)
+               bfa_trc_init(bfad->trcmod);
+       else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
+               bfa_trc_stop(bfad->trcmod);
+
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
+
+       if (iocmd->ctl == BFA_TRUE)
+               bfad->plog_buf.plog_enabled = 1;
+       else
+               bfad->plog_buf.plog_enabled = 0;
+
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+       struct bfa_bsg_fcpim_profile_s *iocmd =
+                               (struct bfa_bsg_fcpim_profile_s *)cmd;
+       struct timeval  tv;
+       unsigned long   flags;
+
+       do_gettimeofday(&tv);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
+               iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
+       else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
+               iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_itnim_ioprofile_s *iocmd =
+                               (struct bfa_bsg_itnim_ioprofile_s *)cmd;
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_fcs_itnim_s *itnim;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+                               iocmd->vf_id, iocmd->lpwwn);
+       if (!fcs_port)
+               iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+       else {
+               itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+               if (itnim == NULL)
+                       iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+               else
+                       iocmd->status = bfa_itnim_get_ioprofile(
+                                               bfa_fcs_itnim_get_halitn(itnim),
+                                               &iocmd->ioprofile);
+       }
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       return 0;
+}
+
+int
+bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_fcport_stats_s *iocmd =
+                               (struct bfa_bsg_fcport_stats_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+       struct bfa_cb_pending_q_s cb_qe;
+
+       init_completion(&fcomp.comp);
+       bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+                          &fcomp, &iocmd->stats);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK) {
+               bfa_trc(bfad, iocmd->status);
+               goto out;
+       }
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+       struct bfa_cb_pending_q_s cb_qe;
+
+       init_completion(&fcomp.comp);
+       bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK) {
+               bfa_trc(bfad, iocmd->status);
+               goto out;
+       }
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+                       BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
+                       &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
+                       bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
+                       BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
+                       &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
+                       bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
+       struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
+       struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
+       pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
+       pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
+       memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
+       iocmd->status = BFA_STATUS_OK;
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       return 0;
+}
+
+int
+bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+                               BFA_FLASH_PART_PXECFG,
+                               bfad->bfa.ioc.port_id, &iocmd->cfg,
+                               sizeof(struct bfa_ethboot_cfg_s), 0,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
+                               BFA_FLASH_PART_PXECFG,
+                               bfad->bfa.ioc.port_id, &iocmd->cfg,
+                               sizeof(struct bfa_ethboot_cfg_s), 0,
+                               bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK)
+               goto out;
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+       struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+       if (v_cmd == IOCMD_TRUNK_ENABLE) {
+               trunk->attr.state = BFA_TRUNK_OFFLINE;
+               bfa_fcport_disable(&bfad->bfa);
+               fcport->cfg.trunked = BFA_TRUE;
+       } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
+               trunk->attr.state = BFA_TRUNK_DISABLED;
+               bfa_fcport_disable(&bfad->bfa);
+               fcport->cfg.trunked = BFA_FALSE;
+       }
+
+       if (!bfa_fcport_is_disabled(&bfad->bfa))
+               bfa_fcport_enable(&bfad->bfa);
+
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+       struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
+               sizeof(struct bfa_trunk_attr_s));
+       iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
+               if (v_cmd == IOCMD_QOS_ENABLE)
+                       fcport->cfg.qos_enabled = BFA_TRUE;
+               else if (v_cmd == IOCMD_QOS_DISABLE)
+                       fcport->cfg.qos_enabled = BFA_FALSE;
+       }
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->attr.state = fcport->qos_attr.state;
+       iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_qos_vc_attr_s *iocmd =
+                               (struct bfa_bsg_qos_vc_attr_s *)cmd;
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+       struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
+       unsigned long   flags;
+       u32     i = 0;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
+       iocmd->attr.shared_credit  = be16_to_cpu(bfa_vc_attr->shared_credit);
+       iocmd->attr.elp_opmode_flags  =
+                               be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
+
+       /* Individual VC info */
+       while (i < iocmd->attr.total_vc_count) {
+               iocmd->attr.vc_info[i].vc_credit =
+                               bfa_vc_attr->vc_info[i].vc_credit;
+               iocmd->attr.vc_info[i].borrow_credit =
+                               bfa_vc_attr->vc_info[i].borrow_credit;
+               iocmd->attr.vc_info[i].priority =
+                               bfa_vc_attr->vc_info[i].priority;
+               i++;
+       }
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       iocmd->status = BFA_STATUS_OK;
+       return 0;
+}
+
+int
+bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_fcport_stats_s *iocmd =
+                               (struct bfa_bsg_fcport_stats_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+       struct bfa_cb_pending_q_s cb_qe;
+
+       init_completion(&fcomp.comp);
+       bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+                          &fcomp, &iocmd->stats);
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+       iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK) {
+               bfa_trc(bfad, iocmd->status);
+               goto out;
+       }
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+       struct bfad_hal_comp fcomp;
+       unsigned long   flags;
+       struct bfa_cb_pending_q_s cb_qe;
+
+       init_completion(&fcomp.comp);
+       bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+                          &fcomp, NULL);
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+       iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (iocmd->status != BFA_STATUS_OK) {
+               bfa_trc(bfad, iocmd->status);
+               goto out;
+       }
+       wait_for_completion(&fcomp.comp);
+       iocmd->status = fcomp.status;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_vf_stats_s *iocmd =
+                       (struct bfa_bsg_vf_stats_s *)cmd;
+       struct bfa_fcs_fabric_s *fcs_vf;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+       if (fcs_vf == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+               goto out;
+       }
+       memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
+               sizeof(struct bfa_vf_stats_s));
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_vf_reset_stats_s *iocmd =
+                       (struct bfa_bsg_vf_reset_stats_s *)cmd;
+       struct bfa_fcs_fabric_s *fcs_vf;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+       if (fcs_vf == NULL) {
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+               goto out;
+       }
+       memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       iocmd->status = BFA_STATUS_OK;
+out:
+       return 0;
+}
+
+int
+bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
+{
+       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE)
+               iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
+       else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE)
+               iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
+       else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
+               iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       return 0;
+}
+
+int
+bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
+{
+       struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
+                       (struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
+       struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       return 0;
+}
+
+int
+bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+       struct bfa_bsg_fcpim_lunmask_s *iocmd =
+                               (struct bfa_bsg_fcpim_lunmask_s *)cmd;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
+               iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
+                                       &iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
+       else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
+               iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
+                                       iocmd->vf_id, &iocmd->pwwn,
+                                       iocmd->rpwwn, iocmd->lun);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       return 0;
+}
+
+static int
+bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
+               unsigned int payload_len)
+{
+       int rc = -EINVAL;
+
+       switch (cmd) {
+       case IOCMD_IOC_ENABLE:
+               rc = bfad_iocmd_ioc_enable(bfad, iocmd);
+               break;
+       case IOCMD_IOC_DISABLE:
+               rc = bfad_iocmd_ioc_disable(bfad, iocmd);
+               break;
+       case IOCMD_IOC_GET_INFO:
+               rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
+               break;
+       case IOCMD_IOC_GET_ATTR:
+               rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_IOC_GET_STATS:
+               rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
+               break;
+       case IOCMD_IOC_GET_FWSTATS:
+               rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_IOC_RESET_STATS:
+       case IOCMD_IOC_RESET_FWSTATS:
+               rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
+               break;
+       case IOCMD_IOC_SET_ADAPTER_NAME:
+       case IOCMD_IOC_SET_PORT_NAME:
+               rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
+               break;
+       case IOCMD_IOCFC_GET_ATTR:
+               rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_IOCFC_SET_INTR:
+               rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
+               break;
+       case IOCMD_PORT_ENABLE:
+               rc = bfad_iocmd_port_enable(bfad, iocmd);
+               break;
+       case IOCMD_PORT_DISABLE:
+               rc = bfad_iocmd_port_disable(bfad, iocmd);
+               break;
+       case IOCMD_PORT_GET_ATTR:
+               rc = bfad_iocmd_port_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_PORT_GET_STATS:
+               rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_PORT_RESET_STATS:
+               rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
+               break;
+       case IOCMD_PORT_CFG_TOPO:
+       case IOCMD_PORT_CFG_SPEED:
+       case IOCMD_PORT_CFG_ALPA:
+       case IOCMD_PORT_CLR_ALPA:
+               rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
+               break;
+       case IOCMD_PORT_CFG_MAXFRSZ:
+               rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
+               break;
+       case IOCMD_PORT_BBSC_ENABLE:
+       case IOCMD_PORT_BBSC_DISABLE:
+               rc = bfad_iocmd_port_cfg_bbsc(bfad, iocmd, cmd);
+               break;
+       case IOCMD_LPORT_GET_ATTR:
+               rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_LPORT_GET_STATS:
+               rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
+               break;
+       case IOCMD_LPORT_RESET_STATS:
+               rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
+               break;
+       case IOCMD_LPORT_GET_IOSTATS:
+               rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
+               break;
+       case IOCMD_LPORT_GET_RPORTS:
+               rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_RPORT_GET_ATTR:
+               rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_RPORT_GET_ADDR:
+               rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
+               break;
+       case IOCMD_RPORT_GET_STATS:
+               rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
+               break;
+       case IOCMD_RPORT_RESET_STATS:
+               rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
+               break;
+       case IOCMD_RPORT_SET_SPEED:
+               rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
+               break;
+       case IOCMD_VPORT_GET_ATTR:
+               rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_VPORT_GET_STATS:
+               rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
+               break;
+       case IOCMD_VPORT_RESET_STATS:
+               rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
+               break;
+       case IOCMD_FABRIC_GET_LPORTS:
+               rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_RATELIM_ENABLE:
+       case IOCMD_RATELIM_DISABLE:
+               rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
+               break;
+       case IOCMD_RATELIM_DEF_SPEED:
+               rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
+               break;
+       case IOCMD_FCPIM_FAILOVER:
+               rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
+               break;
+       case IOCMD_FCPIM_MODSTATS:
+               rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
+               break;
+       case IOCMD_FCPIM_MODSTATSCLR:
+               rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
+               break;
+       case IOCMD_FCPIM_DEL_ITN_STATS:
+               rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
+               break;
+       case IOCMD_ITNIM_GET_ATTR:
+               rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_ITNIM_GET_IOSTATS:
+               rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
+               break;
+       case IOCMD_ITNIM_RESET_STATS:
+               rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
+               break;
+       case IOCMD_ITNIM_GET_ITNSTATS:
+               rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
+               break;
+       case IOCMD_FCPORT_ENABLE:
+               rc = bfad_iocmd_fcport_enable(bfad, iocmd);
+               break;
+       case IOCMD_FCPORT_DISABLE:
+               rc = bfad_iocmd_fcport_disable(bfad, iocmd);
+               break;
+       case IOCMD_IOC_PCIFN_CFG:
+               rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
+               break;
+       case IOCMD_PCIFN_CREATE:
+               rc = bfad_iocmd_pcifn_create(bfad, iocmd);
+               break;
+       case IOCMD_PCIFN_DELETE:
+               rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
+               break;
+       case IOCMD_PCIFN_BW:
+               rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
+               break;
+       case IOCMD_ADAPTER_CFG_MODE:
+               rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
+               break;
+       case IOCMD_PORT_CFG_MODE:
+               rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
+               break;
+       case IOCMD_FLASH_ENABLE_OPTROM:
+       case IOCMD_FLASH_DISABLE_OPTROM:
+               rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
+               break;
+       case IOCMD_FAA_ENABLE:
+               rc = bfad_iocmd_faa_enable(bfad, iocmd);
+               break;
+       case IOCMD_FAA_DISABLE:
+               rc = bfad_iocmd_faa_disable(bfad, iocmd);
+               break;
+       case IOCMD_FAA_QUERY:
+               rc = bfad_iocmd_faa_query(bfad, iocmd);
+               break;
+       case IOCMD_CEE_GET_ATTR:
+               rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_CEE_GET_STATS:
+               rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_CEE_RESET_STATS:
+               rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
+               break;
+       case IOCMD_SFP_MEDIA:
+               rc = bfad_iocmd_sfp_media(bfad, iocmd);
+                break;
+       case IOCMD_SFP_SPEED:
+               rc = bfad_iocmd_sfp_speed(bfad, iocmd);
+               break;
+       case IOCMD_FLASH_GET_ATTR:
+               rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_FLASH_ERASE_PART:
+               rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
+               break;
+       case IOCMD_FLASH_UPDATE_PART:
+               rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_FLASH_READ_PART:
+               rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_DIAG_TEMP:
+               rc = bfad_iocmd_diag_temp(bfad, iocmd);
+               break;
+       case IOCMD_DIAG_MEMTEST:
+               rc = bfad_iocmd_diag_memtest(bfad, iocmd);
+               break;
+       case IOCMD_DIAG_LOOPBACK:
+               rc = bfad_iocmd_diag_loopback(bfad, iocmd);
+               break;
+       case IOCMD_DIAG_FWPING:
+               rc = bfad_iocmd_diag_fwping(bfad, iocmd);
+               break;
+       case IOCMD_DIAG_QUEUETEST:
+               rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
+               break;
+       case IOCMD_DIAG_SFP:
+               rc = bfad_iocmd_diag_sfp(bfad, iocmd);
+               break;
+       case IOCMD_DIAG_LED:
+               rc = bfad_iocmd_diag_led(bfad, iocmd);
+               break;
+       case IOCMD_DIAG_BEACON_LPORT:
+               rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
+               break;
+       case IOCMD_DIAG_LB_STAT:
+               rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
+               break;
+       case IOCMD_PHY_GET_ATTR:
+               rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_PHY_GET_STATS:
+               rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
+               break;
+       case IOCMD_PHY_UPDATE_FW:
+               rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_PHY_READ_FW:
+               rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_VHBA_QUERY:
+               rc = bfad_iocmd_vhba_query(bfad, iocmd);
+               break;
+       case IOCMD_DEBUG_PORTLOG:
+               rc = bfad_iocmd_porglog_get(bfad, iocmd);
+               break;
+       case IOCMD_DEBUG_FW_CORE:
+               rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
+               break;
+       case IOCMD_DEBUG_FW_STATE_CLR:
+       case IOCMD_DEBUG_PORTLOG_CLR:
+       case IOCMD_DEBUG_START_DTRC:
+       case IOCMD_DEBUG_STOP_DTRC:
+               rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
+               break;
+       case IOCMD_DEBUG_PORTLOG_CTL:
+               rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
+               break;
+       case IOCMD_FCPIM_PROFILE_ON:
+       case IOCMD_FCPIM_PROFILE_OFF:
+               rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
+               break;
+       case IOCMD_ITNIM_GET_IOPROFILE:
+               rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
+               break;
+       case IOCMD_FCPORT_GET_STATS:
+               rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
+               break;
+       case IOCMD_FCPORT_RESET_STATS:
+               rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
+               break;
+       case IOCMD_BOOT_CFG:
+               rc = bfad_iocmd_boot_cfg(bfad, iocmd);
+               break;
+       case IOCMD_BOOT_QUERY:
+               rc = bfad_iocmd_boot_query(bfad, iocmd);
+               break;
+       case IOCMD_PREBOOT_QUERY:
+               rc = bfad_iocmd_preboot_query(bfad, iocmd);
+               break;
+       case IOCMD_ETHBOOT_CFG:
+               rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
+               break;
+       case IOCMD_ETHBOOT_QUERY:
+               rc = bfad_iocmd_ethboot_query(bfad, iocmd);
+               break;
+       case IOCMD_TRUNK_ENABLE:
+       case IOCMD_TRUNK_DISABLE:
+               rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
+               break;
+       case IOCMD_TRUNK_GET_ATTR:
+               rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_QOS_ENABLE:
+       case IOCMD_QOS_DISABLE:
+               rc = bfad_iocmd_qos(bfad, iocmd, cmd);
+               break;
+       case IOCMD_QOS_GET_ATTR:
+               rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
+               break;
+       case IOCMD_QOS_GET_VC_ATTR:
+               rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
+               break;
+       case IOCMD_QOS_GET_STATS:
+               rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
+               break;
+       case IOCMD_QOS_RESET_STATS:
+               rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
+               break;
+       case IOCMD_VF_GET_STATS:
+               rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
+               break;
+       case IOCMD_VF_RESET_STATS:
+               rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
+               break;
+       case IOCMD_FCPIM_LUNMASK_ENABLE:
+       case IOCMD_FCPIM_LUNMASK_DISABLE:
+       case IOCMD_FCPIM_LUNMASK_CLEAR:
+               rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
+               break;
+       case IOCMD_FCPIM_LUNMASK_QUERY:
+               rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
+               break;
+       case IOCMD_FCPIM_LUNMASK_ADD:
+       case IOCMD_FCPIM_LUNMASK_DELETE:
+               rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
+               break;
+       default:
+               rc = -EINVAL;
+               break;
+       }
+       return rc;
+}
+
+static int
+bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
+{
+       uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
+       struct bfad_im_port_s *im_port =
+                       (struct bfad_im_port_s *) job->shost->hostdata[0];
+       struct bfad_s *bfad = im_port->bfad;
+       void *payload_kbuf;
+       int rc = -EINVAL;
+
+       /* Allocate a temp buffer to hold the passed in user space command */
+       payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
+       if (!payload_kbuf) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
+       sg_copy_to_buffer(job->request_payload.sg_list,
+                         job->request_payload.sg_cnt, payload_kbuf,
+                         job->request_payload.payload_len);
+
+       /* Invoke IOCMD handler - to handle all the vendor command requests */
+       rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
+                               job->request_payload.payload_len);
+       if (rc != BFA_STATUS_OK)
+               goto error;
+
+       /* Copy the response data to the job->reply_payload sg_list */
+       sg_copy_from_buffer(job->reply_payload.sg_list,
+                           job->reply_payload.sg_cnt,
+                           payload_kbuf,
+                           job->reply_payload.payload_len);
+
+       /* free the command buffer */
+       kfree(payload_kbuf);
+
+       /* Fill the BSG job reply data */
+       job->reply_len = job->reply_payload.payload_len;
+       job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+       job->reply->result = rc;
+
+       job->job_done(job);
+       return rc;
+error:
+       /* free the command buffer */
+       kfree(payload_kbuf);
+out:
+       job->reply->result = rc;
+       job->reply_len = sizeof(uint32_t);
+       job->reply->reply_payload_rcv_len = 0;
+       return rc;
+}
+
+/* FC passthru call backs */
+u64
+bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
+{
+       struct bfad_fcxp        *drv_fcxp = bfad_fcxp;
+       struct bfa_sge_s  *sge;
+       u64     addr;
+
+       sge = drv_fcxp->req_sge + sgeid;
+       addr = (u64)(size_t) sge->sg_addr;
+       return addr;
+}
+
+u32
+bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
+{
+       struct bfad_fcxp        *drv_fcxp = bfad_fcxp;
+       struct bfa_sge_s        *sge;
+
+       sge = drv_fcxp->req_sge + sgeid;
+       return sge->sg_len;
+}
+
+u64
+bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
+{
+       struct bfad_fcxp        *drv_fcxp = bfad_fcxp;
+       struct bfa_sge_s        *sge;
+       u64     addr;
+
+       sge = drv_fcxp->rsp_sge + sgeid;
+       addr = (u64)(size_t) sge->sg_addr;
+       return addr;
+}
+
+u32
+bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
+{
+       struct bfad_fcxp        *drv_fcxp = bfad_fcxp;
+       struct bfa_sge_s        *sge;
+
+       sge = drv_fcxp->rsp_sge + sgeid;
+       return sge->sg_len;
+}
+
+void
+bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
+               bfa_status_t req_status, u32 rsp_len, u32 resid_len,
+               struct fchs_s *rsp_fchs)
+{
+       struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+
+       drv_fcxp->req_status = req_status;
+       drv_fcxp->rsp_len = rsp_len;
+
+       /* bfa_fcxp will be automatically freed by BFA */
+       drv_fcxp->bfa_fcxp = NULL;
+       complete(&drv_fcxp->comp);
+}
+
+struct bfad_buf_info *
+bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
+                uint32_t payload_len, uint32_t *num_sgles)
+{
+       struct bfad_buf_info    *buf_base, *buf_info;
+       struct bfa_sge_s        *sg_table;
+       int sge_num = 1;
+
+       buf_base = kzalloc((sizeof(struct bfad_buf_info) +
+                          sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
+       if (!buf_base)
+               return NULL;
+
+       sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
+                       (sizeof(struct bfad_buf_info) * sge_num));
+
+       /* Allocate dma coherent memory */
+       buf_info = buf_base;
+       buf_info->size = payload_len;
+       buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size,
+                                       &buf_info->phys, GFP_KERNEL);
+       if (!buf_info->virt)
+               goto out_free_mem;
+
+       /* copy the linear bsg buffer to buf_info */
+       memset(buf_info->virt, 0, buf_info->size);
+       memcpy(buf_info->virt, payload_kbuf, buf_info->size);
+
+       /*
+        * Setup SG table
+        */
+       sg_table->sg_len = buf_info->size;
+       sg_table->sg_addr = (void *)(size_t) buf_info->phys;
+
+       *num_sgles = sge_num;
+
+       return buf_base;
+
+out_free_mem:
+       kfree(buf_base);
+       return NULL;
+}
+
+void
+bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
+                  uint32_t num_sgles)
+{
+       int i;
+       struct bfad_buf_info *buf_info = buf_base;
+
+       if (buf_base) {
+               for (i = 0; i < num_sgles; buf_info++, i++) {
+                       if (buf_info->virt != NULL)
+                               dma_free_coherent(&bfad->pcidev->dev,
+                                       buf_info->size, buf_info->virt,
+                                       buf_info->phys);
+               }
+               kfree(buf_base);
+       }
+}
+
+int
+bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
+                  bfa_bsg_fcpt_t *bsg_fcpt)
+{
+       struct bfa_fcxp_s *hal_fcxp;
+       struct bfad_s   *bfad = drv_fcxp->port->bfad;
+       unsigned long   flags;
+       uint8_t lp_tag;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+       /* Allocate bfa_fcxp structure */
+       hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa,
+                                 drv_fcxp->num_req_sgles,
+                                 drv_fcxp->num_rsp_sgles,
+                                 bfad_fcxp_get_req_sgaddr_cb,
+                                 bfad_fcxp_get_req_sglen_cb,
+                                 bfad_fcxp_get_rsp_sgaddr_cb,
+                                 bfad_fcxp_get_rsp_sglen_cb);
+       if (!hal_fcxp) {
+               bfa_trc(bfad, 0);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               return BFA_STATUS_ENOMEM;
+       }
+
+       drv_fcxp->bfa_fcxp = hal_fcxp;
+
+       lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
+
+       bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
+                     bsg_fcpt->cts, bsg_fcpt->cos,
+                     job->request_payload.payload_len,
+                     &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
+                     job->reply_payload.payload_len, bsg_fcpt->tsecs);
+
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       return BFA_STATUS_OK;
+}
+
+int
+bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
+{
+       struct bfa_bsg_data *bsg_data;
+       struct bfad_im_port_s *im_port =
+                       (struct bfad_im_port_s *) job->shost->hostdata[0];
+       struct bfad_s *bfad = im_port->bfad;
+       bfa_bsg_fcpt_t *bsg_fcpt;
+       struct bfad_fcxp    *drv_fcxp;
+       struct bfa_fcs_lport_s *fcs_port;
+       struct bfa_fcs_rport_s *fcs_rport;
+       uint32_t command_type = job->request->msgcode;
+       unsigned long flags;
+       struct bfad_buf_info *rsp_buf_info;
+       void *req_kbuf = NULL, *rsp_kbuf = NULL;
+       int rc = -EINVAL;
+
+       job->reply_len  = sizeof(uint32_t);     /* Atleast uint32_t reply_len */
+       job->reply->reply_payload_rcv_len = 0;
+
+       /* Get the payload passed in from userspace */
+       bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
+                                       sizeof(struct fc_bsg_request));
+       if (bsg_data == NULL)
+               goto out;
+
+       /*
+        * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
+        * buffer of size bsg_data->payload_len
+        */
+       bsg_fcpt = (struct bfa_bsg_fcpt_s *)
+                  kzalloc(bsg_data->payload_len, GFP_KERNEL);
+       if (!bsg_fcpt)
+               goto out;
+
+       if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
+                               bsg_data->payload_len)) {
+               kfree(bsg_fcpt);
+               goto out;
+       }
+
+       drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
+       if (drv_fcxp == NULL) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
+                                       bsg_fcpt->lpwwn);
+       if (fcs_port == NULL) {
+               bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               goto out_free_mem;
+       }
+
+       /* Check if the port is online before sending FC Passthru cmd */
+       if (!bfa_fcs_lport_is_online(fcs_port)) {
+               bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               goto out_free_mem;
+       }
+
+       drv_fcxp->port = fcs_port->bfad_port;
+
+       if (drv_fcxp->port->bfad == 0)
+               drv_fcxp->port->bfad = bfad;
+
+       /* Fetch the bfa_rport - if nexus needed */
+       if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
+           command_type == FC_BSG_HST_CT) {
+               /* BSG HST commands: no nexus needed */
+               drv_fcxp->bfa_rport = NULL;
+
+       } else if (command_type == FC_BSG_RPT_ELS ||
+                  command_type == FC_BSG_RPT_CT) {
+               /* BSG RPT commands: nexus needed */
+               fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
+                                                           bsg_fcpt->dpwwn);
+               if (fcs_rport == NULL) {
+                       bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
+                       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+                       goto out_free_mem;
+               }
+
+               drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
+
+       } else { /* Unknown BSG msgcode; return -EINVAL */
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               goto out_free_mem;
+       }
+
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       /* allocate memory for req / rsp buffers */
+       req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
+       if (!req_kbuf) {
+               printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
+                               bfad->pci_name);
+               rc = -ENOMEM;
+               goto out_free_mem;
+       }
+
+       rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
+       if (!rsp_kbuf) {
+               printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
+                               bfad->pci_name);
+               rc = -ENOMEM;
+               goto out_free_mem;
+       }
+
+       /* map req sg - copy the sg_list passed in to the linear buffer */
+       sg_copy_to_buffer(job->request_payload.sg_list,
+                         job->request_payload.sg_cnt, req_kbuf,
+                         job->request_payload.payload_len);
+
+       drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
+                                       job->request_payload.payload_len,
+                                       &drv_fcxp->num_req_sgles);
+       if (!drv_fcxp->reqbuf_info) {
+               printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
+                               bfad->pci_name);
+               rc = -ENOMEM;
+               goto out_free_mem;
+       }
+
+       drv_fcxp->req_sge = (struct bfa_sge_s *)
+                           (((uint8_t *)drv_fcxp->reqbuf_info) +
+                           (sizeof(struct bfad_buf_info) *
+                                       drv_fcxp->num_req_sgles));
+
+       /* map rsp sg */
+       drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
+                                       job->reply_payload.payload_len,
+                                       &drv_fcxp->num_rsp_sgles);
+       if (!drv_fcxp->rspbuf_info) {
+               printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
+                               bfad->pci_name);
+               rc = -ENOMEM;
+               goto out_free_mem;
+       }
+
+       rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
+       drv_fcxp->rsp_sge = (struct bfa_sge_s  *)
+                           (((uint8_t *)drv_fcxp->rspbuf_info) +
+                           (sizeof(struct bfad_buf_info) *
+                                       drv_fcxp->num_rsp_sgles));
+
+       /* fcxp send */
+       init_completion(&drv_fcxp->comp);
+       rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
+       if (rc == BFA_STATUS_OK) {
+               wait_for_completion(&drv_fcxp->comp);
+               bsg_fcpt->status = drv_fcxp->req_status;
+       } else {
+               bsg_fcpt->status = rc;
+               goto out_free_mem;
+       }
+
+       /* fill the job->reply data */
+       if (drv_fcxp->req_status == BFA_STATUS_OK) {
+               job->reply_len = drv_fcxp->rsp_len;
+               job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
+               job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+       } else {
+               job->reply->reply_payload_rcv_len =
+                                       sizeof(struct fc_bsg_ctels_reply);
+               job->reply_len = sizeof(uint32_t);
+               job->reply->reply_data.ctels_reply.status =
+                                               FC_CTELS_STATUS_REJECT;
+       }
+
+       /* Copy the response data to the reply_payload sg list */
+       sg_copy_from_buffer(job->reply_payload.sg_list,
+                           job->reply_payload.sg_cnt,
+                           (uint8_t *)rsp_buf_info->virt,
+                           job->reply_payload.payload_len);
+
+out_free_mem:
+       bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
+                          drv_fcxp->num_rsp_sgles);
+       bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
+                          drv_fcxp->num_req_sgles);
+       kfree(req_kbuf);
+       kfree(rsp_kbuf);
+
+       /* Need a copy to user op */
+       if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt,
+                        bsg_data->payload_len))
+               rc = -EIO;
+
+       kfree(bsg_fcpt);
+       kfree(drv_fcxp);
+out:
+       job->reply->result = rc;
+
+       if (rc == BFA_STATUS_OK)
+               job->job_done(job);
+
+       return rc;
+}
+
+int
+bfad_im_bsg_request(struct fc_bsg_job *job)
+{
+       uint32_t rc = BFA_STATUS_OK;
+
+       switch (job->request->msgcode) {
+       case FC_BSG_HST_VENDOR:
+               /* Process BSG HST Vendor requests */
+               rc = bfad_im_bsg_vendor_request(job);
+               break;
+       case FC_BSG_HST_ELS_NOLOGIN:
+       case FC_BSG_RPT_ELS:
+       case FC_BSG_HST_CT:
+       case FC_BSG_RPT_CT:
+               /* Process BSG ELS/CT commands */
+               rc = bfad_im_bsg_els_ct_request(job);
+               break;
+       default:
+               job->reply->result = rc = -EINVAL;
+               job->reply->reply_payload_rcv_len = 0;
+               break;
+       }
+
+       return rc;
+}
+
+int
+bfad_im_bsg_timeout(struct fc_bsg_job *job)
+{
+       /* Don't complete the BSG job request - return -EAGAIN
+        * to reset bsg job timeout : for ELS/CT pass thru we
+        * already have timer to track the request.
+        */
+       return -EAGAIN;
+}
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
new file mode 100644 (file)
index 0000000..e859adb
--- /dev/null
@@ -0,0 +1,746 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#ifndef BFAD_BSG_H
+#define BFAD_BSG_H
+
+#include "bfa_defs.h"
+#include "bfa_defs_fcs.h"
+
+/* Definitions of vendor unique structures and command codes passed in
+ * using FC_BSG_HST_VENDOR message code.
+ */
+enum {
+       IOCMD_IOC_ENABLE = 0x1,
+       IOCMD_IOC_DISABLE,
+       IOCMD_IOC_GET_ATTR,
+       IOCMD_IOC_GET_INFO,
+       IOCMD_IOC_GET_STATS,
+       IOCMD_IOC_GET_FWSTATS,
+       IOCMD_IOC_RESET_STATS,
+       IOCMD_IOC_RESET_FWSTATS,
+       IOCMD_IOC_SET_ADAPTER_NAME,
+       IOCMD_IOC_SET_PORT_NAME,
+       IOCMD_IOCFC_GET_ATTR,
+       IOCMD_IOCFC_SET_INTR,
+       IOCMD_PORT_ENABLE,
+       IOCMD_PORT_DISABLE,
+       IOCMD_PORT_GET_ATTR,
+       IOCMD_PORT_GET_STATS,
+       IOCMD_PORT_RESET_STATS,
+       IOCMD_PORT_CFG_TOPO,
+       IOCMD_PORT_CFG_SPEED,
+       IOCMD_PORT_CFG_ALPA,
+       IOCMD_PORT_CFG_MAXFRSZ,
+       IOCMD_PORT_CLR_ALPA,
+       IOCMD_PORT_BBSC_ENABLE,
+       IOCMD_PORT_BBSC_DISABLE,
+       IOCMD_LPORT_GET_ATTR,
+       IOCMD_LPORT_GET_RPORTS,
+       IOCMD_LPORT_GET_STATS,
+       IOCMD_LPORT_RESET_STATS,
+       IOCMD_LPORT_GET_IOSTATS,
+       IOCMD_RPORT_GET_ATTR,
+       IOCMD_RPORT_GET_ADDR,
+       IOCMD_RPORT_GET_STATS,
+       IOCMD_RPORT_RESET_STATS,
+       IOCMD_RPORT_SET_SPEED,
+       IOCMD_VPORT_GET_ATTR,
+       IOCMD_VPORT_GET_STATS,
+       IOCMD_VPORT_RESET_STATS,
+       IOCMD_FABRIC_GET_LPORTS,
+       IOCMD_RATELIM_ENABLE,
+       IOCMD_RATELIM_DISABLE,
+       IOCMD_RATELIM_DEF_SPEED,
+       IOCMD_FCPIM_FAILOVER,
+       IOCMD_FCPIM_MODSTATS,
+       IOCMD_FCPIM_MODSTATSCLR,
+       IOCMD_FCPIM_DEL_ITN_STATS,
+       IOCMD_ITNIM_GET_ATTR,
+       IOCMD_ITNIM_GET_IOSTATS,
+       IOCMD_ITNIM_RESET_STATS,
+       IOCMD_ITNIM_GET_ITNSTATS,
+       IOCMD_IOC_PCIFN_CFG,
+       IOCMD_FCPORT_ENABLE,
+       IOCMD_FCPORT_DISABLE,
+       IOCMD_PCIFN_CREATE,
+       IOCMD_PCIFN_DELETE,
+       IOCMD_PCIFN_BW,
+       IOCMD_ADAPTER_CFG_MODE,
+       IOCMD_PORT_CFG_MODE,
+       IOCMD_FLASH_ENABLE_OPTROM,
+       IOCMD_FLASH_DISABLE_OPTROM,
+       IOCMD_FAA_ENABLE,
+       IOCMD_FAA_DISABLE,
+       IOCMD_FAA_QUERY,
+       IOCMD_CEE_GET_ATTR,
+       IOCMD_CEE_GET_STATS,
+       IOCMD_CEE_RESET_STATS,
+       IOCMD_SFP_MEDIA,
+       IOCMD_SFP_SPEED,
+       IOCMD_FLASH_GET_ATTR,
+       IOCMD_FLASH_ERASE_PART,
+       IOCMD_FLASH_UPDATE_PART,
+       IOCMD_FLASH_READ_PART,
+       IOCMD_DIAG_TEMP,
+       IOCMD_DIAG_MEMTEST,
+       IOCMD_DIAG_LOOPBACK,
+       IOCMD_DIAG_FWPING,
+       IOCMD_DIAG_QUEUETEST,
+       IOCMD_DIAG_SFP,
+       IOCMD_DIAG_LED,
+       IOCMD_DIAG_BEACON_LPORT,
+       IOCMD_DIAG_LB_STAT,
+       IOCMD_PHY_GET_ATTR,
+       IOCMD_PHY_GET_STATS,
+       IOCMD_PHY_UPDATE_FW,
+       IOCMD_PHY_READ_FW,
+       IOCMD_VHBA_QUERY,
+       IOCMD_DEBUG_PORTLOG,
+       IOCMD_DEBUG_FW_CORE,
+       IOCMD_DEBUG_FW_STATE_CLR,
+       IOCMD_DEBUG_PORTLOG_CLR,
+       IOCMD_DEBUG_START_DTRC,
+       IOCMD_DEBUG_STOP_DTRC,
+       IOCMD_DEBUG_PORTLOG_CTL,
+       IOCMD_FCPIM_PROFILE_ON,
+       IOCMD_FCPIM_PROFILE_OFF,
+       IOCMD_ITNIM_GET_IOPROFILE,
+       IOCMD_FCPORT_GET_STATS,
+       IOCMD_FCPORT_RESET_STATS,
+       IOCMD_BOOT_CFG,
+       IOCMD_BOOT_QUERY,
+       IOCMD_PREBOOT_QUERY,
+       IOCMD_ETHBOOT_CFG,
+       IOCMD_ETHBOOT_QUERY,
+       IOCMD_TRUNK_ENABLE,
+       IOCMD_TRUNK_DISABLE,
+       IOCMD_TRUNK_GET_ATTR,
+       IOCMD_QOS_ENABLE,
+       IOCMD_QOS_DISABLE,
+       IOCMD_QOS_GET_ATTR,
+       IOCMD_QOS_GET_VC_ATTR,
+       IOCMD_QOS_GET_STATS,
+       IOCMD_QOS_RESET_STATS,
+       IOCMD_VF_GET_STATS,
+       IOCMD_VF_RESET_STATS,
+       IOCMD_FCPIM_LUNMASK_ENABLE,
+       IOCMD_FCPIM_LUNMASK_DISABLE,
+       IOCMD_FCPIM_LUNMASK_CLEAR,
+       IOCMD_FCPIM_LUNMASK_QUERY,
+       IOCMD_FCPIM_LUNMASK_ADD,
+       IOCMD_FCPIM_LUNMASK_DELETE,
+};
+
+struct bfa_bsg_gen_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+};
+
+struct bfa_bsg_portlogctl_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       bfa_boolean_t   ctl;
+       int             inst_no;
+};
+
+struct bfa_bsg_fcpim_profile_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+};
+
+struct bfa_bsg_itnim_ioprofile_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           lpwwn;
+       wwn_t           rpwwn;
+       struct bfa_itnim_ioprofile_s ioprofile;
+};
+
+struct bfa_bsg_fcport_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       union bfa_fcport_stats_u stats;
+};
+
+struct bfa_bsg_ioc_name_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       char            name[BFA_ADAPTER_SYM_NAME_LEN];
+};
+
+struct bfa_bsg_ioc_info_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       char            serialnum[64];
+       char            hwpath[BFA_STRING_32];
+       char            adapter_hwpath[BFA_STRING_32];
+       char            guid[BFA_ADAPTER_SYM_NAME_LEN*2];
+       char            name[BFA_ADAPTER_SYM_NAME_LEN];
+       char            port_name[BFA_ADAPTER_SYM_NAME_LEN];
+       char            eth_name[BFA_ADAPTER_SYM_NAME_LEN];
+       wwn_t           pwwn;
+       wwn_t           nwwn;
+       wwn_t           factorypwwn;
+       wwn_t           factorynwwn;
+       mac_t           mac;
+       mac_t           factory_mac; /* Factory mac address */
+       mac_t           current_mac; /* Currently assigned mac address */
+       enum bfa_ioc_type_e     ioc_type;
+       u16             pvid; /* Port vlan id */
+       u16             rsvd1;
+       u32             host;
+       u32             bandwidth; /* For PF support */
+       u32             rsvd2;
+};
+
+struct bfa_bsg_ioc_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_ioc_attr_s  ioc_attr;
+};
+
+struct bfa_bsg_ioc_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_ioc_stats_s ioc_stats;
+};
+
+struct bfa_bsg_ioc_fwstats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       u32             buf_size;
+       u32             rsvd1;
+       u64             buf_ptr;
+};
+
+struct bfa_bsg_iocfc_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_iocfc_attr_s iocfc_attr;
+};
+
+struct bfa_bsg_iocfc_intr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_iocfc_intr_attr_s attr;
+};
+
+struct bfa_bsg_port_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_port_attr_s  attr;
+};
+
+struct bfa_bsg_port_cfg_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       u32             param;
+       u32             rsvd1;
+};
+
+struct bfa_bsg_port_cfg_maxfrsize_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             maxfrsize;
+};
+
+struct bfa_bsg_port_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       u32             buf_size;
+       u32             rsvd1;
+       u64             buf_ptr;
+};
+
+struct bfa_bsg_lport_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           pwwn;
+       struct bfa_lport_attr_s port_attr;
+};
+
+struct bfa_bsg_lport_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           pwwn;
+       struct bfa_lport_stats_s port_stats;
+};
+
+struct bfa_bsg_lport_iostats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           pwwn;
+       struct bfa_itnim_iostats_s iostats;
+};
+
+struct bfa_bsg_lport_get_rports_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           pwwn;
+       u64             rbuf_ptr;
+       u32             nrports;
+       u32             rsvd;
+};
+
+struct bfa_bsg_rport_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           pwwn;
+       wwn_t           rpwwn;
+       struct bfa_rport_attr_s attr;
+};
+
+struct bfa_bsg_rport_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           pwwn;
+       wwn_t           rpwwn;
+       struct bfa_rport_stats_s stats;
+};
+
+struct bfa_bsg_rport_scsi_addr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           pwwn;
+       wwn_t           rpwwn;
+       u32             host;
+       u32             bus;
+       u32             target;
+       u32             lun;
+};
+
+struct bfa_bsg_rport_reset_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           pwwn;
+       wwn_t           rpwwn;
+};
+
+struct bfa_bsg_rport_set_speed_s {
+       bfa_status_t            status;
+       u16                     bfad_num;
+       u16                     vf_id;
+       enum bfa_port_speed     speed;
+       u32                     rsvd;
+       wwn_t                   pwwn;
+       wwn_t                   rpwwn;
+};
+
+struct bfa_bsg_vport_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           vpwwn;
+       struct bfa_vport_attr_s vport_attr;
+};
+
+struct bfa_bsg_vport_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           vpwwn;
+       struct bfa_vport_stats_s vport_stats;
+};
+
+struct bfa_bsg_reset_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           vpwwn;
+};
+
+struct bfa_bsg_fabric_get_lports_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       u64             buf_ptr;
+       u32             nports;
+       u32             rsvd;
+};
+
+struct bfa_bsg_trl_speed_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       enum bfa_port_speed speed;
+};
+
+struct bfa_bsg_fcpim_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             param;
+};
+
+struct bfa_bsg_fcpim_modstats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       struct bfa_itnim_iostats_s modstats;
+};
+
+struct bfa_bsg_fcpim_del_itn_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       struct bfa_fcpim_del_itn_stats_s modstats;
+};
+
+struct bfa_bsg_fcpim_modstatsclr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+};
+
+struct bfa_bsg_itnim_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           lpwwn;
+       wwn_t           rpwwn;
+       struct bfa_itnim_attr_s attr;
+};
+
+struct bfa_bsg_itnim_iostats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           lpwwn;
+       wwn_t           rpwwn;
+       struct bfa_itnim_iostats_s iostats;
+};
+
+struct bfa_bsg_itnim_itnstats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           lpwwn;
+       wwn_t           rpwwn;
+       struct bfa_itnim_stats_s itnstats;
+};
+
+struct bfa_bsg_pcifn_cfg_s {
+       bfa_status_t            status;
+       u16                     bfad_num;
+       u16                     rsvd;
+       struct bfa_ablk_cfg_s   pcifn_cfg;
+};
+
+struct bfa_bsg_pcifn_s {
+       bfa_status_t            status;
+       u16                     bfad_num;
+       u16                     pcifn_id;
+       u32                     bandwidth;
+       u8                      port;
+       enum bfi_pcifn_class    pcifn_class;
+       u8                      rsvd[1];
+};
+
+struct bfa_bsg_adapter_cfg_mode_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_adapter_cfg_mode_s   cfg;
+};
+
+struct bfa_bsg_port_cfg_mode_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             instance;
+       struct bfa_port_cfg_mode_s cfg;
+};
+
+struct bfa_bsg_faa_attr_s {
+       bfa_status_t            status;
+       u16                     bfad_num;
+       u16                     rsvd;
+       struct bfa_faa_attr_s   faa_attr;
+};
+
+struct bfa_bsg_cee_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       u32             buf_size;
+       u32             rsvd1;
+       u64             buf_ptr;
+};
+
+struct bfa_bsg_cee_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       u32             buf_size;
+       u32             rsvd1;
+       u64             buf_ptr;
+};
+
+struct bfa_bsg_sfp_media_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       enum bfa_defs_sfp_media_e media;
+};
+
+struct bfa_bsg_sfp_speed_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       enum bfa_port_speed speed;
+};
+
+struct bfa_bsg_flash_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_flash_attr_s attr;
+};
+
+struct bfa_bsg_flash_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u8              instance;
+       u8              rsvd;
+       enum  bfa_flash_part_type type;
+       int             bufsz;
+       u64             buf_ptr;
+};
+
+struct bfa_bsg_diag_get_temp_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_diag_results_tempsensor_s result;
+};
+
+struct bfa_bsg_diag_memtest_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd[3];
+       u32             pat;
+       struct bfa_diag_memtest_result result;
+       struct bfa_diag_memtest_s memtest;
+};
+
+struct bfa_bsg_diag_loopback_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       enum bfa_port_opmode opmode;
+       enum bfa_port_speed speed;
+       u32             lpcnt;
+       u32             pat;
+       struct bfa_diag_loopback_result_s result;
+};
+
+struct bfa_bsg_diag_fwping_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       u32             cnt;
+       u32             pattern;
+       struct bfa_diag_results_fwping result;
+};
+
+struct bfa_bsg_diag_qtest_s {
+       bfa_status_t    status;
+       u16     bfad_num;
+       u16     rsvd;
+       u32     force;
+       u32     queue;
+       struct bfa_diag_qtest_result_s result;
+};
+
+struct bfa_bsg_sfp_show_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct sfp_mem_s sfp;
+};
+
+struct bfa_bsg_diag_led_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_diag_ledtest_s ledtest;
+};
+
+struct bfa_bsg_diag_beacon_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       bfa_boolean_t   beacon;
+       bfa_boolean_t   link_e2e_beacon;
+       u32             second;
+};
+
+struct bfa_bsg_diag_lb_stat_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+};
+
+struct bfa_bsg_phy_attr_s {
+       bfa_status_t    status;
+       u16     bfad_num;
+       u16     instance;
+       struct bfa_phy_attr_s   attr;
+};
+
+struct bfa_bsg_phy_s {
+       bfa_status_t    status;
+       u16     bfad_num;
+       u16     instance;
+       u64     bufsz;
+       u64     buf_ptr;
+};
+
+struct bfa_bsg_debug_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       u32             bufsz;
+       int             inst_no;
+       u64             buf_ptr;
+       u64             offset;
+};
+
+struct bfa_bsg_phy_stats_s {
+       bfa_status_t    status;
+       u16     bfad_num;
+       u16     instance;
+       struct bfa_phy_stats_s  stats;
+};
+
+struct bfa_bsg_vhba_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             pcifn_id;
+       struct bfa_vhba_attr_s  attr;
+};
+
+struct bfa_bsg_boot_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_boot_cfg_s   cfg;
+};
+
+struct bfa_bsg_preboot_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_boot_pbc_s   cfg;
+};
+
+struct bfa_bsg_ethboot_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct  bfa_ethboot_cfg_s  cfg;
+};
+
+struct bfa_bsg_trunk_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_trunk_attr_s attr;
+};
+
+struct bfa_bsg_qos_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_qos_attr_s   attr;
+};
+
+struct bfa_bsg_qos_vc_attr_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             rsvd;
+       struct bfa_qos_vc_attr_s attr;
+};
+
+struct bfa_bsg_vf_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       struct bfa_vf_stats_s   stats;
+};
+
+struct bfa_bsg_vf_reset_stats_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+};
+
+struct bfa_bsg_fcpim_lunmask_query_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       struct bfa_lunmask_cfg_s lun_mask;
+};
+
+struct bfa_bsg_fcpim_lunmask_s {
+       bfa_status_t    status;
+       u16             bfad_num;
+       u16             vf_id;
+       wwn_t           pwwn;
+       wwn_t           rpwwn;
+       struct scsi_lun lun;
+};
+
+struct bfa_bsg_fcpt_s {
+       bfa_status_t    status;
+       u16             vf_id;
+       wwn_t           lpwwn;
+       wwn_t           dpwwn;
+       u32             tsecs;
+       int             cts;
+       enum fc_cos     cos;
+       struct fchs_s   fchs;
+};
+#define bfa_bsg_fcpt_t struct bfa_bsg_fcpt_s
+
+struct bfa_bsg_data {
+       int payload_len;
+       void *payload;
+};
+
+#define bfad_chk_iocmd_sz(__payload_len, __hdrsz, __bufsz)     \
+       (((__payload_len) != ((__hdrsz) + (__bufsz))) ?         \
+        BFA_STATUS_FAILED : BFA_STATUS_OK)
+
+#endif /* BFAD_BSG_H */
index 48be0c54f2de8844e6844e067304cc3868b10cee..b412e0300dd4756cc6549974ec9368c9c50eb794 100644 (file)
@@ -214,10 +214,10 @@ bfad_debugfs_read(struct file *file, char __user *buf,
 
 #define BFA_REG_CT_ADDRSZ      (0x40000)
 #define BFA_REG_CB_ADDRSZ      (0x20000)
-#define BFA_REG_ADDRSZ(__bfa)  \
-       ((bfa_ioc_devid(&(__bfa)->ioc) == BFA_PCI_DEVICE_ID_CT) ?       \
-               BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)
-#define BFA_REG_ADDRMSK(__bfa)  ((u32)(BFA_REG_ADDRSZ(__bfa) - 1))
+#define BFA_REG_ADDRSZ(__ioc)  \
+       ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ?  \
+        BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ))
+#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1)
 
 static bfa_status_t
 bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
@@ -236,7 +236,7 @@ bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
                        return BFA_STATUS_EINVAL;
        } else {
                /* CB register space 64KB */
-               if ((offset + (len<<2)) > BFA_REG_ADDRMSK(bfa))
+               if ((offset + (len<<2)) > BFA_REG_ADDRMSK(&bfa->ioc))
                        return BFA_STATUS_EINVAL;
        }
        return BFA_STATUS_OK;
@@ -317,7 +317,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
 
        bfad->reglen = len << 2;
        rb = bfa_ioc_bar0(ioc);
-       addr &= BFA_REG_ADDRMSK(bfa);
+       addr &= BFA_REG_ADDRMSK(ioc);
 
        /* offset and len sanity check */
        rc = bfad_reg_offset_check(bfa, addr, len);
@@ -380,7 +380,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
        }
        kfree(kern_buf);
 
-       addr &= BFA_REG_ADDRMSK(bfa); /* offset only 17 bit and word align */
+       addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */
 
        /* offset and len sanity check */
        rc = bfad_reg_offset_check(bfa, addr, 1);
index bcba1827ef54c469000a37b8c60e7c126e0342e2..bda999ad9f5232a61981b96a4c6bae9b80437c0e 100644 (file)
@@ -27,6 +27,7 @@
 #define __BFAD_DRV_H__
 
 #include <linux/types.h>
+#include <linux/version.h>
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
 #include <linux/idr.h>
@@ -42,6 +43,7 @@
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/scsi_transport.h>
+#include <scsi/scsi_bsg_fc.h>
 
 #include "bfa_modules.h"
 #include "bfa_fcs.h"
@@ -54,7 +56,7 @@
 #ifdef BFA_DRIVER_VERSION
 #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
 #else
-#define BFAD_DRIVER_VERSION    "2.3.2.3"
+#define BFAD_DRIVER_VERSION    "3.0.2.2"
 #endif
 
 #define BFAD_PROTO_NAME FCPI_NAME
@@ -78,7 +80,7 @@
 #define BFAD_HAL_INIT_FAIL                     0x00000100
 #define BFAD_FC4_PROBE_DONE                    0x00000200
 #define BFAD_PORT_DELETE                       0x00000001
-
+#define BFAD_INTX_ON                           0x00000400
 /*
  * BFAD related definition
  */
@@ -91,6 +93,8 @@
  */
 #define BFAD_LUN_QUEUE_DEPTH   32
 #define BFAD_IO_MAX_SGE                SG_ALL
+#define BFAD_MIN_SECTORS       128 /* 64k   */
+#define BFAD_MAX_SECTORS       0xFFFF  /* 32 MB */
 
 #define bfad_isr_t irq_handler_t
 
@@ -109,6 +113,7 @@ struct bfad_msix_s {
 enum {
        BFA_TRC_LDRV_BFAD               = 1,
        BFA_TRC_LDRV_IM                 = 2,
+       BFA_TRC_LDRV_BSG                = 3,
 };
 
 enum bfad_port_pvb_type {
@@ -188,8 +193,10 @@ struct bfad_s {
        struct bfa_pcidev_s hal_pcidev;
        struct bfa_ioc_pci_attr_s pci_attr;
        void __iomem   *pci_bar0_kva;
+       void __iomem   *pci_bar2_kva;
        struct completion comp;
        struct completion suspend;
+       struct completion enable_comp;
        struct completion disable_comp;
        bfa_boolean_t   disable_active;
        struct bfad_port_s     pport;   /* physical port of the BFAD */
@@ -217,6 +224,10 @@ struct bfad_s {
        char *regdata;
        u32 reglen;
        struct dentry *bfad_dentry_files[5];
+       struct list_head        free_aen_q;
+       struct list_head        active_aen_q;
+       struct bfa_aen_entry_s  aen_list[BFA_AEN_MAX_ENTRY];
+       spinlock_t              bfad_aen_spinlock;
 };
 
 /* BFAD state machine events */
@@ -272,21 +283,6 @@ struct bfad_hal_comp {
        struct completion comp;
 };
 
-/*
- * Macro to obtain the immediate lower power
- * of two for the integer.
- */
-#define nextLowerInt(x)                         \
-do {                                            \
-       int __i;                                  \
-       (*x)--;                                 \
-       for (__i = 1; __i < (sizeof(int)*8); __i <<= 1) \
-               (*x) = (*x) | (*x) >> __i;      \
-       (*x)++;                                 \
-       (*x) = (*x) >> 1;                       \
-} while (0)
-
-
 #define BFA_LOG(level, bfad, mask, fmt, arg...)                                \
 do {                                                                   \
        if (((mask) == 4) || (level[1] <= '4'))                         \
@@ -353,6 +349,7 @@ extern int      msix_disable_ct;
 extern int      fdmi_enable;
 extern int      supported_fc4s;
 extern int     pcie_max_read_reqsz;
+extern int     max_xfer_size;
 extern int bfa_debugfs_enable;
 extern struct mutex bfad_mutex;
 
index c2b36179e8e88e26c53e0783ce56b1ba406a4173..01312381639f74415d1ebd2ca10c1752308fb922 100644 (file)
@@ -175,21 +175,11 @@ bfad_im_info(struct Scsi_Host *shost)
        struct bfad_im_port_s *im_port =
                        (struct bfad_im_port_s *) shost->hostdata[0];
        struct bfad_s *bfad = im_port->bfad;
-       struct bfa_s *bfa = &bfad->bfa;
-       struct bfa_ioc_s *ioc = &bfa->ioc;
-       char model[BFA_ADAPTER_MODEL_NAME_LEN];
-
-       bfa_get_adapter_model(bfa, model);
 
        memset(bfa_buf, 0, sizeof(bfa_buf));
-       if (ioc->ctdev && !ioc->fcmode)
-               snprintf(bfa_buf, sizeof(bfa_buf),
-               "Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s",
-                model, bfad->pci_name, BFAD_DRIVER_VERSION);
-       else
-               snprintf(bfa_buf, sizeof(bfa_buf),
-               "Brocade FC Adapter, " "model: %s hwpath: %s driver: %s",
-               model, bfad->pci_name, BFAD_DRIVER_VERSION);
+       snprintf(bfa_buf, sizeof(bfa_buf),
+               "Brocade FC/FCOE Adapter, " "hwpath: %s driver: %s",
+               bfad->pci_name, BFAD_DRIVER_VERSION);
 
        return bfa_buf;
 }
@@ -572,9 +562,6 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
                goto out_fc_rel;
        }
 
-       /* setup host fixed attribute if the lk supports */
-       bfad_fc_host_init(im_port);
-
        return 0;
 
 out_fc_rel:
@@ -669,6 +656,31 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port)
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 }
 
+static void bfad_aen_im_notify_handler(struct work_struct *work)
+{
+       struct bfad_im_s *im =
+               container_of(work, struct bfad_im_s, aen_im_notify_work);
+       struct bfa_aen_entry_s *aen_entry;
+       struct bfad_s *bfad = im->bfad;
+       struct Scsi_Host *shost = bfad->pport.im_port->shost;
+       void *event_data;
+       unsigned long flags;
+
+       while (!list_empty(&bfad->active_aen_q)) {
+               spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
+               bfa_q_deq(&bfad->active_aen_q, &aen_entry);
+               spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
+               event_data = (char *)aen_entry + sizeof(struct list_head);
+               fc_host_post_vendor_event(shost, fc_get_event_number(),
+                               sizeof(struct bfa_aen_entry_s) -
+                               sizeof(struct list_head),
+                               (char *)event_data, BFAD_NL_VENDOR_ID);
+               spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
+               list_add_tail(&aen_entry->qe, &bfad->free_aen_q);
+               spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
+       }
+}
+
 bfa_status_t
 bfad_im_probe(struct bfad_s *bfad)
 {
@@ -689,6 +701,7 @@ bfad_im_probe(struct bfad_s *bfad)
                rc = BFA_STATUS_FAILED;
        }
 
+       INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler);
 ext:
        return rc;
 }
@@ -713,6 +726,9 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
        else
                sht = &bfad_im_vport_template;
 
+       if (max_xfer_size != BFAD_MAX_SECTORS >> 1)
+               sht->max_sectors = max_xfer_size << 1;
+
        sht->sg_tablesize = bfad->cfg_data.io_max_sge;
 
        return scsi_host_alloc(sht, sizeof(unsigned long));
@@ -790,7 +806,8 @@ struct scsi_host_template bfad_im_scsi_host_template = {
        .cmd_per_lun = 3,
        .use_clustering = ENABLE_CLUSTERING,
        .shost_attrs = bfad_im_host_attrs,
-       .max_sectors = 0xFFFF,
+       .max_sectors = BFAD_MAX_SECTORS,
+       .vendor_id = BFA_PCI_VENDOR_ID_BROCADE,
 };
 
 struct scsi_host_template bfad_im_vport_template = {
@@ -811,7 +828,7 @@ struct scsi_host_template bfad_im_vport_template = {
        .cmd_per_lun = 3,
        .use_clustering = ENABLE_CLUSTERING,
        .shost_attrs = bfad_im_vport_attrs,
-       .max_sectors = 0xFFFF,
+       .max_sectors = BFAD_MAX_SECTORS,
 };
 
 bfa_status_t
@@ -925,7 +942,10 @@ bfad_im_supported_speeds(struct bfa_s *bfa)
                return 0;
 
        bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
-       if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
+       if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_16GBPS)
+               supported_speed |=  FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
+                               FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT;
+       else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
                if (ioc_attr->adapter_attr.is_mezz) {
                        supported_speed |= FC_PORTSPEED_8GBIT |
                                FC_PORTSPEED_4GBIT |
index c296c8968511b72e9a7d2132540fda7baadce65f..004b6cf848d943288934452237c1cfa2ef8b8cfd 100644 (file)
@@ -115,8 +115,30 @@ struct bfad_im_s {
        struct bfad_s         *bfad;
        struct workqueue_struct *drv_workq;
        char            drv_workq_name[KOBJ_NAME_LEN];
+       struct work_struct      aen_im_notify_work;
 };
 
+#define bfad_get_aen_entry(_drv, _entry) do {                          \
+       unsigned long   _flags;                                         \
+       spin_lock_irqsave(&(_drv)->bfad_aen_spinlock, _flags);          \
+       bfa_q_deq(&(_drv)->free_aen_q, &(_entry));                      \
+       if (_entry)                                                     \
+               list_add_tail(&(_entry)->qe, &(_drv)->active_aen_q);    \
+       spin_unlock_irqrestore(&(_drv)->bfad_aen_spinlock, _flags);     \
+} while (0)
+
+/* post fc_host vendor event */
+#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do {       \
+       do_gettimeofday(&(_entry)->aen_tv);                                   \
+       (_entry)->bfad_num = (_drv)->inst_no;                                 \
+       (_entry)->seq_num = (_cnt);                                           \
+       (_entry)->aen_category = (_cat);                                      \
+       (_entry)->aen_type = (_evt);                                          \
+       if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE)                         \
+               queue_work((_drv)->im->drv_workq,                             \
+                          &(_drv)->im->aen_im_notify_work);                  \
+} while (0)
+
 struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
                                struct bfad_s *);
 bfa_status_t bfad_thread_workq(struct bfad_s *bfad);
@@ -141,4 +163,7 @@ extern struct device_attribute *bfad_im_vport_attrs[];
 
 irqreturn_t bfad_intx(int irq, void *dev_id);
 
+int bfad_im_bsg_request(struct fc_bsg_job *job);
+int bfad_im_bsg_timeout(struct fc_bsg_job *job);
+
 #endif
index 72b69a0c3b516eb01091ddb07a0599b889666969..b2ba0b2e91b2cdfe03bed11ce417c95b880313e7 100644 (file)
 
 #pragma pack(1)
 
+/* Per dma segment max size */
+#define BFI_MEM_DMA_SEG_SZ     (131072)
+
+/* Get number of dma segments required */
+#define BFI_MEM_DMA_NSEGS(_num_reqs, _req_sz)                          \
+       ((u16)(((((_num_reqs) * (_req_sz)) + BFI_MEM_DMA_SEG_SZ - 1) &  \
+        ~(BFI_MEM_DMA_SEG_SZ - 1)) / BFI_MEM_DMA_SEG_SZ))
+
+/* Get num dma reqs - that fit in a segment */
+#define BFI_MEM_NREQS_SEG(_rqsz) (BFI_MEM_DMA_SEG_SZ / (_rqsz))
+
+/* Get segment num from tag */
+#define BFI_MEM_SEG_FROM_TAG(_tag, _rqsz) ((_tag) / BFI_MEM_NREQS_SEG(_rqsz))
+
+/* Get dma req offset in a segment */
+#define BFI_MEM_SEG_REQ_OFFSET(_tag, _sz)      \
+       ((_tag) - (BFI_MEM_SEG_FROM_TAG(_tag, _sz) * BFI_MEM_NREQS_SEG(_sz)))
+
 /*
  * BFI FW image type
  */
 #define        BFI_FLASH_CHUNK_SZ                      256     /*  Flash chunk size */
 #define        BFI_FLASH_CHUNK_SZ_WORDS        (BFI_FLASH_CHUNK_SZ/sizeof(u32))
-enum {
-       BFI_IMAGE_CB_FC,
-       BFI_IMAGE_CT_FC,
-       BFI_IMAGE_CT_CNA,
-       BFI_IMAGE_MAX,
-};
 
 /*
  * Msg header common to all msgs
@@ -43,17 +55,20 @@ struct bfi_mhdr_s {
        u8              msg_id;         /*  msg opcode with in the class   */
        union {
                struct {
-                       u8      rsvd;
-                       u8      lpu_id; /*  msg destination                 */
+                       u8      qid;
+                       u8      fn_lpu; /*  msg destination                 */
                } h2i;
                u16     i2htok; /*  token in msgs to host           */
        } mtag;
 };
 
-#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do {                \
+#define bfi_fn_lpu(__fn, __lpu)        ((__fn) << 1 | (__lpu))
+#define bfi_mhdr_2_fn(_mh)     ((_mh)->mtag.h2i.fn_lpu >> 1)
+
+#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do {               \
        (_mh).msg_class         = (_mc);      \
        (_mh).msg_id            = (_op);      \
-       (_mh).mtag.h2i.lpu_id   = (_lpuid);      \
+       (_mh).mtag.h2i.fn_lpu   = (_fn_lpu);      \
 } while (0)
 
 #define bfi_i2h_set(_mh, _mc, _op, _i2htok) do {               \
@@ -101,7 +116,7 @@ union bfi_addr_u {
 };
 
 /*
- * Scatter Gather Element
+ * Scatter Gather Element used for fast-path IO requests
  */
 struct bfi_sge_s {
 #ifdef __BIG_ENDIAN
@@ -116,6 +131,14 @@ struct bfi_sge_s {
        union bfi_addr_u sga;
 };
 
+/**
+ * Generic DMA addr-len pair.
+ */
+struct bfi_alen_s {
+       union bfi_addr_u        al_addr;        /* DMA addr of buffer   */
+       u32                     al_len;         /* length of buffer     */
+};
+
 /*
  * Scatter Gather Page
  */
@@ -127,6 +150,12 @@ struct bfi_sgpg_s {
        u32     rsvd[BFI_SGPG_RSVD_WD_LEN];
 };
 
+/* FCP module definitions */
+#define BFI_IO_MAX     (2000)
+#define BFI_IOIM_SNSLEN        (256)
+#define BFI_IOIM_SNSBUF_SEGS   \
+       BFI_MEM_DMA_NSEGS(BFI_IO_MAX, BFI_IOIM_SNSLEN)
+
 /*
  * Large Message structure - 128 Bytes size Msgs
  */
@@ -148,19 +177,30 @@ struct bfi_mbmsg_s {
        u32             pl[BFI_MBMSG_SZ];
 };
 
+/*
+ * Supported PCI function class codes (personality)
+ */
+enum bfi_pcifn_class {
+       BFI_PCIFN_CLASS_FC  = 0x0c04,
+       BFI_PCIFN_CLASS_ETH = 0x0200,
+};
+
 /*
  * Message Classes
  */
 enum bfi_mclass {
        BFI_MC_IOC              = 1,    /*  IO Controller (IOC)     */
+       BFI_MC_DIAG             = 2,    /*  Diagnostic Msgs            */
+       BFI_MC_FLASH            = 3,    /*  Flash message class */
+       BFI_MC_CEE              = 4,    /*  CEE */
        BFI_MC_FCPORT           = 5,    /*  FC port                         */
        BFI_MC_IOCFC            = 6,    /*  FC - IO Controller (IOC)        */
-       BFI_MC_LL               = 7,    /*  Link Layer                      */
+       BFI_MC_ABLK             = 7,    /*  ASIC block configuration        */
        BFI_MC_UF               = 8,    /*  Unsolicited frame receive       */
        BFI_MC_FCXP             = 9,    /*  FC Transport                    */
        BFI_MC_LPS              = 10,   /*  lport fc login services         */
        BFI_MC_RPORT            = 11,   /*  Remote port             */
-       BFI_MC_ITNIM            = 12,   /*  I-T nexus (Initiator mode)      */
+       BFI_MC_ITN              = 12,   /*  I-T nexus (Initiator mode)      */
        BFI_MC_IOIM_READ        = 13,   /*  read IO (Initiator mode)        */
        BFI_MC_IOIM_WRITE       = 14,   /*  write IO (Initiator mode)       */
        BFI_MC_IOIM_IO          = 15,   /*  IO (Initiator mode)     */
@@ -168,6 +208,8 @@ enum bfi_mclass {
        BFI_MC_IOIM_IOCOM       = 17,   /*  good IO completion              */
        BFI_MC_TSKIM            = 18,   /*  Initiator Task management       */
        BFI_MC_PORT             = 21,   /*  Physical port                   */
+       BFI_MC_SFP              = 22,   /*  SFP module  */
+       BFI_MC_PHY              = 25,   /*  External PHY message class  */
        BFI_MC_MAX              = 32
 };
 
@@ -175,23 +217,28 @@ enum bfi_mclass {
 #define BFI_IOC_MAX_CQS_ASIC   8
 #define BFI_IOC_MSGLEN_MAX     32      /* 32 bytes */
 
-#define BFI_BOOT_TYPE_OFF              8
-#define BFI_BOOT_LOADER_OFF            12
-
-#define BFI_BOOT_TYPE_NORMAL           0
-#define        BFI_BOOT_TYPE_FLASH             1
-#define        BFI_BOOT_TYPE_MEMTEST           2
-
-#define BFI_BOOT_LOADER_OS             0
-#define BFI_BOOT_LOADER_BIOS           1
-#define BFI_BOOT_LOADER_UEFI           2
-
 /*
  *----------------------------------------------------------------------
  *                             IOC
  *----------------------------------------------------------------------
  */
 
+/*
+ * Different asic generations
+ */
+enum bfi_asic_gen {
+       BFI_ASIC_GEN_CB         = 1,    /* crossbow 8G FC               */
+       BFI_ASIC_GEN_CT         = 2,    /* catapult 8G FC or 10G CNA    */
+       BFI_ASIC_GEN_CT2        = 3,    /* catapult-2 16G FC or 10G CNA */
+};
+
+enum bfi_asic_mode {
+       BFI_ASIC_MODE_FC        = 1,    /* FC upto 8G speed             */
+       BFI_ASIC_MODE_FC16      = 2,    /* FC upto 16G speed            */
+       BFI_ASIC_MODE_ETH       = 3,    /* Ethernet ports               */
+       BFI_ASIC_MODE_COMBO     = 4,    /* FC 16G and Ethernet 10G port */
+};
+
 enum bfi_ioc_h2i_msgs {
        BFI_IOC_H2I_ENABLE_REQ          = 1,
        BFI_IOC_H2I_DISABLE_REQ         = 2,
@@ -204,8 +251,8 @@ enum bfi_ioc_i2h_msgs {
        BFI_IOC_I2H_ENABLE_REPLY        = BFA_I2HM(1),
        BFI_IOC_I2H_DISABLE_REPLY       = BFA_I2HM(2),
        BFI_IOC_I2H_GETATTR_REPLY       = BFA_I2HM(3),
-       BFI_IOC_I2H_READY_EVENT         = BFA_I2HM(4),
-       BFI_IOC_I2H_HBEAT               = BFA_I2HM(5),
+       BFI_IOC_I2H_HBEAT               = BFA_I2HM(4),
+       BFI_IOC_I2H_ACQ_ADDR_REPLY      = BFA_I2HM(5),
 };
 
 /*
@@ -220,7 +267,8 @@ struct bfi_ioc_attr_s {
        wwn_t           mfg_pwwn;       /*  Mfg port wwn           */
        wwn_t           mfg_nwwn;       /*  Mfg node wwn           */
        mac_t           mfg_mac;        /*  Mfg mac                */
-       u16     rsvd_a;
+       u8              port_mode;      /* bfi_port_mode           */
+       u8              rsvd_a;
        wwn_t           pwwn;
        wwn_t           nwwn;
        mac_t           mac;            /*  PBC or Mfg mac         */
@@ -272,21 +320,33 @@ struct bfi_ioc_getattr_reply_s {
 #define BFI_IOC_FW_SIGNATURE   (0xbfadbfad)
 #define BFI_IOC_MD5SUM_SZ      4
 struct bfi_ioc_image_hdr_s {
-       u32     signature;      /*  constant signature */
-       u32     rsvd_a;
-       u32     exec;           /*  exec vector */
-       u32     param;          /*  parameters          */
+       u32     signature;      /* constant signature           */
+       u8      asic_gen;       /* asic generation              */
+       u8      asic_mode;
+       u8      port0_mode;     /* device mode for port 0       */
+       u8      port1_mode;     /* device mode for port 1       */
+       u32     exec;           /* exec vector                  */
+       u32     bootenv;        /* fimware boot env             */
        u32     rsvd_b[4];
        u32     md5sum[BFI_IOC_MD5SUM_SZ];
 };
 
-/*
- *  BFI_IOC_I2H_READY_EVENT message
- */
-struct bfi_ioc_rdy_event_s {
-       struct bfi_mhdr_s       mh;             /*  common msg header */
-       u8                      init_status;    /*  init event status */
-       u8                      rsvd[3];
+#define BFI_FWBOOT_DEVMODE_OFF         4
+#define BFI_FWBOOT_TYPE_OFF            8
+#define BFI_FWBOOT_ENV_OFF             12
+#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \
+       (((u32)(__asic_gen)) << 24 |            \
+        ((u32)(__asic_mode)) << 16 |           \
+        ((u32)(__p0_mode)) << 8 |              \
+        ((u32)(__p1_mode)))
+
+#define BFI_FWBOOT_TYPE_NORMAL 0
+#define BFI_FWBOOT_TYPE_MEMTEST        2
+#define BFI_FWBOOT_ENV_OS       0
+
+enum bfi_port_mode {
+       BFI_PORT_MODE_FC        = 1,
+       BFI_PORT_MODE_ETH       = 2,
 };
 
 struct bfi_ioc_hbeat_s {
@@ -345,8 +405,8 @@ enum {
  */
 struct bfi_ioc_ctrl_req_s {
        struct bfi_mhdr_s       mh;
-       u8                      ioc_class;
-       u8                      rsvd[3];
+       u16                     clscode;
+       u16                     rsvd;
        u32             tv_sec;
 };
 #define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s;
@@ -358,7 +418,9 @@ struct bfi_ioc_ctrl_req_s {
 struct bfi_ioc_ctrl_reply_s {
        struct bfi_mhdr_s       mh;             /*  Common msg header     */
        u8                      status;         /*  enable/disable status */
-       u8                      rsvd[3];
+       u8                      port_mode;      /*  bfa_mode_s  */
+       u8                      cap_bm;         /*  capability bit mask */
+       u8                      rsvd;
 };
 #define bfi_ioc_enable_reply_t struct bfi_ioc_ctrl_reply_s;
 #define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s;
@@ -380,7 +442,7 @@ union bfi_ioc_h2i_msg_u {
  */
 union bfi_ioc_i2h_msg_u {
        struct bfi_mhdr_s               mh;
-       struct bfi_ioc_rdy_event_s      rdy_event;
+       struct bfi_ioc_ctrl_reply_s     fw_event;
        u32                     mboxmsg[BFI_IOC_MSGSZ];
 };
 
@@ -393,6 +455,7 @@ union bfi_ioc_i2h_msg_u {
 
 #define BFI_PBC_MAX_BLUNS      8
 #define BFI_PBC_MAX_VPORTS     16
+#define BFI_PBC_PORT_DISABLED  2
 
 /*
  * PBC boot lun configuration
@@ -574,6 +637,516 @@ union bfi_port_i2h_msg_u {
        struct bfi_port_generic_rsp_s   clearstats_rsp;
 };
 
+/*
+ *----------------------------------------------------------------------
+ *                             ABLK
+ *----------------------------------------------------------------------
+ */
+enum bfi_ablk_h2i_msgs_e {
+       BFI_ABLK_H2I_QUERY              = 1,
+       BFI_ABLK_H2I_ADPT_CONFIG        = 2,
+       BFI_ABLK_H2I_PORT_CONFIG        = 3,
+       BFI_ABLK_H2I_PF_CREATE          = 4,
+       BFI_ABLK_H2I_PF_DELETE          = 5,
+       BFI_ABLK_H2I_PF_UPDATE          = 6,
+       BFI_ABLK_H2I_OPTROM_ENABLE      = 7,
+       BFI_ABLK_H2I_OPTROM_DISABLE     = 8,
+};
+
+enum bfi_ablk_i2h_msgs_e {
+       BFI_ABLK_I2H_QUERY              = BFA_I2HM(BFI_ABLK_H2I_QUERY),
+       BFI_ABLK_I2H_ADPT_CONFIG        = BFA_I2HM(BFI_ABLK_H2I_ADPT_CONFIG),
+       BFI_ABLK_I2H_PORT_CONFIG        = BFA_I2HM(BFI_ABLK_H2I_PORT_CONFIG),
+       BFI_ABLK_I2H_PF_CREATE          = BFA_I2HM(BFI_ABLK_H2I_PF_CREATE),
+       BFI_ABLK_I2H_PF_DELETE          = BFA_I2HM(BFI_ABLK_H2I_PF_DELETE),
+       BFI_ABLK_I2H_PF_UPDATE          = BFA_I2HM(BFI_ABLK_H2I_PF_UPDATE),
+       BFI_ABLK_I2H_OPTROM_ENABLE      = BFA_I2HM(BFI_ABLK_H2I_OPTROM_ENABLE),
+       BFI_ABLK_I2H_OPTROM_DISABLE     = BFA_I2HM(BFI_ABLK_H2I_OPTROM_DISABLE),
+};
+
+/* BFI_ABLK_H2I_QUERY */
+struct bfi_ablk_h2i_query_s {
+       struct bfi_mhdr_s       mh;
+       union bfi_addr_u        addr;
+};
+
+/* BFI_ABL_H2I_ADPT_CONFIG, BFI_ABLK_H2I_PORT_CONFIG */
+struct bfi_ablk_h2i_cfg_req_s {
+       struct bfi_mhdr_s       mh;
+       u8                      mode;
+       u8                      port;
+       u8                      max_pf;
+       u8                      max_vf;
+};
+
+/*
+ * BFI_ABLK_H2I_PF_CREATE, BFI_ABLK_H2I_PF_DELETE,
+ */
+struct bfi_ablk_h2i_pf_req_s {
+       struct bfi_mhdr_s       mh;
+       u8                      pcifn;
+       u8                      port;
+       u16                     pers;
+       u32                     bw;
+};
+
+/* BFI_ABLK_H2I_OPTROM_ENABLE, BFI_ABLK_H2I_OPTROM_DISABLE */
+struct bfi_ablk_h2i_optrom_s {
+       struct bfi_mhdr_s       mh;
+};
+
+/*
+ * BFI_ABLK_I2H_QUERY
+ * BFI_ABLK_I2H_PORT_CONFIG
+ * BFI_ABLK_I2H_PF_CREATE
+ * BFI_ABLK_I2H_PF_DELETE
+ * BFI_ABLK_I2H_PF_UPDATE
+ * BFI_ABLK_I2H_OPTROM_ENABLE
+ * BFI_ABLK_I2H_OPTROM_DISABLE
+ */
+struct bfi_ablk_i2h_rsp_s {
+       struct bfi_mhdr_s       mh;
+       u8                      status;
+       u8                      pcifn;
+       u8                      port_mode;
+};
+
+
+/*
+ *     CEE module specific messages
+ */
+
+/* Mailbox commands from host to firmware */
+enum bfi_cee_h2i_msgs_e {
+       BFI_CEE_H2I_GET_CFG_REQ = 1,
+       BFI_CEE_H2I_RESET_STATS = 2,
+       BFI_CEE_H2I_GET_STATS_REQ = 3,
+};
+
+enum bfi_cee_i2h_msgs_e {
+       BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
+       BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
+       BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
+};
+
+/*
+ * H2I command structure for resetting the stats
+ */
+struct bfi_cee_reset_stats_s {
+       struct bfi_mhdr_s  mh;
+};
+
+/*
+ * Get configuration  command from host
+ */
+struct bfi_cee_get_req_s {
+       struct bfi_mhdr_s       mh;
+       union bfi_addr_u        dma_addr;
+};
+
+/*
+ * Reply message from firmware
+ */
+struct bfi_cee_get_rsp_s {
+       struct bfi_mhdr_s       mh;
+       u8                      cmd_status;
+       u8                      rsvd[3];
+};
+
+/*
+ * Reply message from firmware
+ */
+struct bfi_cee_stats_rsp_s {
+       struct bfi_mhdr_s       mh;
+       u8                      cmd_status;
+       u8                      rsvd[3];
+};
+
+/* Mailbox message structures from firmware to host    */
+union bfi_cee_i2h_msg_u {
+       struct bfi_mhdr_s               mh;
+       struct bfi_cee_get_rsp_s        get_rsp;
+       struct bfi_cee_stats_rsp_s      stats_rsp;
+};
+
+/*
+ * SFP related
+ */
+
+enum bfi_sfp_h2i_e {
+       BFI_SFP_H2I_SHOW        = 1,
+       BFI_SFP_H2I_SCN         = 2,
+};
+
+enum bfi_sfp_i2h_e {
+       BFI_SFP_I2H_SHOW = BFA_I2HM(BFI_SFP_H2I_SHOW),
+       BFI_SFP_I2H_SCN  = BFA_I2HM(BFI_SFP_H2I_SCN),
+};
+
+/*
+ *     SFP state change notification
+ */
+struct bfi_sfp_scn_s {
+       struct bfi_mhdr_s mhr;  /* host msg header        */
+       u8      event;
+       u8      sfpid;
+       u8      pomlvl; /* pom level: normal/warning/alarm */
+       u8      is_elb; /* e-loopback */
+};
+
+/*
+ *     SFP state
+ */
+enum bfa_sfp_stat_e {
+       BFA_SFP_STATE_INIT      = 0,    /* SFP state is uninit  */
+       BFA_SFP_STATE_REMOVED   = 1,    /* SFP is removed       */
+       BFA_SFP_STATE_INSERTED  = 2,    /* SFP is inserted      */
+       BFA_SFP_STATE_VALID     = 3,    /* SFP is valid         */
+       BFA_SFP_STATE_UNSUPPORT = 4,    /* SFP is unsupport     */
+       BFA_SFP_STATE_FAILED    = 5,    /* SFP i2c read fail    */
+};
+
+/*
+ *  SFP memory access type
+ */
+enum bfi_sfp_mem_e {
+       BFI_SFP_MEM_ALL         = 0x1,  /* access all data field */
+       BFI_SFP_MEM_DIAGEXT     = 0x2,  /* access diag ext data field only */
+};
+
+struct bfi_sfp_req_s {
+       struct bfi_mhdr_s       mh;
+       u8                      memtype;
+       u8                      rsvd[3];
+       struct bfi_alen_s       alen;
+};
+
+struct bfi_sfp_rsp_s {
+       struct bfi_mhdr_s       mh;
+       u8                      status;
+       u8                      state;
+       u8                      rsvd[2];
+};
+
+/*
+ *     FLASH module specific
+ */
+enum bfi_flash_h2i_msgs {
+       BFI_FLASH_H2I_QUERY_REQ = 1,
+       BFI_FLASH_H2I_ERASE_REQ = 2,
+       BFI_FLASH_H2I_WRITE_REQ = 3,
+       BFI_FLASH_H2I_READ_REQ = 4,
+       BFI_FLASH_H2I_BOOT_VER_REQ = 5,
+};
+
+enum bfi_flash_i2h_msgs {
+       BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1),
+       BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2),
+       BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3),
+       BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4),
+       BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5),
+       BFI_FLASH_I2H_EVENT = BFA_I2HM(127),
+};
+
+/*
+ * Flash query request
+ */
+struct bfi_flash_query_req_s {
+       struct bfi_mhdr_s mh;   /* Common msg header */
+       struct bfi_alen_s alen;
+};
+
+/*
+ * Flash erase request
+ */
+struct bfi_flash_erase_req_s {
+       struct bfi_mhdr_s       mh;     /* Common msg header */
+       u32     type;   /* partition type */
+       u8      instance; /* partition instance */
+       u8      rsv[3];
+};
+
+/*
+ * Flash write request
+ */
+struct bfi_flash_write_req_s {
+       struct bfi_mhdr_s mh;   /* Common msg header */
+       struct bfi_alen_s alen;
+       u32     type;   /* partition type */
+       u8      instance; /* partition instance */
+       u8      last;
+       u8      rsv[2];
+       u32     offset;
+       u32     length;
+};
+
+/*
+ * Flash read request
+ */
+struct bfi_flash_read_req_s {
+       struct bfi_mhdr_s mh;   /* Common msg header */
+       u32     type;           /* partition type */
+       u8      instance;       /* partition instance */
+       u8      rsv[3];
+       u32     offset;
+       u32     length;
+       struct bfi_alen_s alen;
+};
+
+/*
+ * Flash query response
+ */
+struct bfi_flash_query_rsp_s {
+       struct bfi_mhdr_s mh;   /* Common msg header */
+       u32     status;
+};
+
+/*
+ * Flash read response
+ */
+struct bfi_flash_read_rsp_s {
+       struct bfi_mhdr_s mh;   /* Common msg header */
+       u32     type;       /* partition type */
+       u8      instance;   /* partition instance */
+       u8      rsv[3];
+       u32     status;
+       u32     length;
+};
+
+/*
+ * Flash write response
+ */
+struct bfi_flash_write_rsp_s {
+       struct bfi_mhdr_s mh;   /* Common msg header */
+       u32     type;       /* partition type */
+       u8      instance;   /* partition instance */
+       u8      rsv[3];
+       u32     status;
+       u32     length;
+};
+
+/*
+ * Flash erase response
+ */
+struct bfi_flash_erase_rsp_s {
+       struct bfi_mhdr_s mh;   /* Common msg header */
+       u32     type;           /* partition type */
+       u8      instance;       /* partition instance */
+       u8      rsv[3];
+       u32     status;
+};
+
+/*
+ * Flash event notification
+ */
+struct bfi_flash_event_s {
+       struct bfi_mhdr_s       mh;     /* Common msg header */
+       bfa_status_t            status;
+       u32                     param;
+};
+
+/*
+ *----------------------------------------------------------------------
+ *                             DIAG
+ *----------------------------------------------------------------------
+ */
+enum bfi_diag_h2i {
+       BFI_DIAG_H2I_PORTBEACON = 1,
+       BFI_DIAG_H2I_LOOPBACK = 2,
+       BFI_DIAG_H2I_FWPING = 3,
+       BFI_DIAG_H2I_TEMPSENSOR = 4,
+       BFI_DIAG_H2I_LEDTEST = 5,
+       BFI_DIAG_H2I_QTEST      = 6,
+};
+
+enum bfi_diag_i2h {
+       BFI_DIAG_I2H_PORTBEACON = BFA_I2HM(BFI_DIAG_H2I_PORTBEACON),
+       BFI_DIAG_I2H_LOOPBACK = BFA_I2HM(BFI_DIAG_H2I_LOOPBACK),
+       BFI_DIAG_I2H_FWPING = BFA_I2HM(BFI_DIAG_H2I_FWPING),
+       BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR),
+       BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST),
+       BFI_DIAG_I2H_QTEST      = BFA_I2HM(BFI_DIAG_H2I_QTEST),
+};
+
+#define BFI_DIAG_MAX_SGES      2
+#define BFI_DIAG_DMA_BUF_SZ    (2 * 1024)
+#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
+#define BFI_BOOT_MEMTEST_RES_SIG  0xA0A1A2A3
+
+struct bfi_diag_lb_req_s {
+       struct bfi_mhdr_s mh;
+       u32     loopcnt;
+       u32     pattern;
+       u8      lb_mode;        /*!< bfa_port_opmode_t */
+       u8      speed;          /*!< bfa_port_speed_t */
+       u8      rsvd[2];
+};
+
+struct bfi_diag_lb_rsp_s {
+       struct bfi_mhdr_s  mh;          /* 4 bytes */
+       struct bfa_diag_loopback_result_s res; /* 16 bytes */
+};
+
+struct bfi_diag_fwping_req_s {
+       struct bfi_mhdr_s mh;   /* 4 bytes */
+       struct bfi_alen_s alen; /* 12 bytes */
+       u32     data;           /* user input data pattern */
+       u32     count;          /* user input dma count */
+       u8      qtag;           /* track CPE vc */
+       u8      rsv[3];
+};
+
+struct bfi_diag_fwping_rsp_s {
+       struct bfi_mhdr_s  mh;          /* 4 bytes */
+       u32     data;           /* user input data pattern    */
+       u8      qtag;           /* track CPE vc               */
+       u8      dma_status;     /* dma status                 */
+       u8      rsv[2];
+};
+
+/*
+ * Temperature Sensor
+ */
+struct bfi_diag_ts_req_s {
+       struct bfi_mhdr_s mh;   /* 4 bytes */
+       u16     temp;           /* 10-bit A/D value */
+       u16     brd_temp;       /* 9-bit board temp */
+       u8      status;
+       u8      ts_junc;        /* show junction tempsensor   */
+       u8      ts_brd;         /* show board tempsensor      */
+       u8      rsv;
+};
+#define bfi_diag_ts_rsp_t struct bfi_diag_ts_req_s
+
+struct bfi_diag_ledtest_req_s {
+       struct bfi_mhdr_s  mh;  /* 4 bytes */
+       u8      cmd;
+       u8      color;
+       u8      portid;
+       u8      led;    /* bitmap of LEDs to be tested */
+       u16     freq;   /* no. of blinks every 10 secs */
+       u8      rsv[2];
+};
+
+/* notify host led operation is done */
+struct bfi_diag_ledtest_rsp_s {
+       struct bfi_mhdr_s  mh;  /* 4 bytes */
+};
+
+struct bfi_diag_portbeacon_req_s {
+       struct bfi_mhdr_s  mh;  /* 4 bytes */
+       u32     period; /* beaconing period */
+       u8      beacon; /* 1: beacon on */
+       u8      rsvd[3];
+};
+
+/* notify host the beacon is off */
+struct bfi_diag_portbeacon_rsp_s {
+       struct bfi_mhdr_s  mh;  /* 4 bytes */
+};
+
+struct bfi_diag_qtest_req_s {
+       struct bfi_mhdr_s       mh;             /* 4 bytes */
+       u32     data[BFI_LMSG_PL_WSZ]; /* fill up tcm prefetch area */
+};
+#define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s
+
+/*
+ *     PHY module specific
+ */
+enum bfi_phy_h2i_msgs_e {
+       BFI_PHY_H2I_QUERY_REQ = 1,
+       BFI_PHY_H2I_STATS_REQ = 2,
+       BFI_PHY_H2I_WRITE_REQ = 3,
+       BFI_PHY_H2I_READ_REQ = 4,
+};
+
+enum bfi_phy_i2h_msgs_e {
+       BFI_PHY_I2H_QUERY_RSP = BFA_I2HM(1),
+       BFI_PHY_I2H_STATS_RSP = BFA_I2HM(2),
+       BFI_PHY_I2H_WRITE_RSP = BFA_I2HM(3),
+       BFI_PHY_I2H_READ_RSP = BFA_I2HM(4),
+};
+
+/*
+ * External PHY query request
+ */
+struct bfi_phy_query_req_s {
+       struct bfi_mhdr_s       mh;             /* Common msg header */
+       u8                      instance;
+       u8                      rsv[3];
+       struct bfi_alen_s       alen;
+};
+
+/*
+ * External PHY stats request
+ */
+struct bfi_phy_stats_req_s {
+       struct bfi_mhdr_s       mh;             /* Common msg header */
+       u8                      instance;
+       u8                      rsv[3];
+       struct bfi_alen_s       alen;
+};
+
+/*
+ * External PHY write request
+ */
+struct bfi_phy_write_req_s {
+       struct bfi_mhdr_s       mh;             /* Common msg header */
+       u8              instance;
+       u8              last;
+       u8              rsv[2];
+       u32             offset;
+       u32             length;
+       struct bfi_alen_s       alen;
+};
+
+/*
+ * External PHY read request
+ */
+struct bfi_phy_read_req_s {
+       struct bfi_mhdr_s       mh;     /* Common msg header */
+       u8              instance;
+       u8              rsv[3];
+       u32             offset;
+       u32             length;
+       struct bfi_alen_s       alen;
+};
+
+/*
+ * External PHY query response
+ */
+struct bfi_phy_query_rsp_s {
+       struct bfi_mhdr_s       mh;     /* Common msg header */
+       u32                     status;
+};
+
+/*
+ * External PHY stats response
+ */
+struct bfi_phy_stats_rsp_s {
+       struct bfi_mhdr_s       mh;     /* Common msg header */
+       u32                     status;
+};
+
+/*
+ * External PHY read response
+ */
+struct bfi_phy_read_rsp_s {
+       struct bfi_mhdr_s       mh;     /* Common msg header */
+       u32                     status;
+       u32             length;
+};
+
+/*
+ * External PHY write response
+ */
+struct bfi_phy_write_rsp_s {
+       struct bfi_mhdr_s       mh;     /* Common msg header */
+       u32                     status;
+       u32                     length;
+};
+
 #pragma pack()
 
 #endif /* __BFI_H__ */
diff --git a/drivers/scsi/bfa/bfi_cbreg.h b/drivers/scsi/bfa/bfi_cbreg.h
deleted file mode 100644 (file)
index 39ad42b..0000000
+++ /dev/null
@@ -1,305 +0,0 @@
-
-/*
- * bfi_cbreg.h crossbow host block register definitions
- *
- * !!! Do not edit. Auto generated. !!!
- */
-
-#ifndef __BFI_CBREG_H__
-#define __BFI_CBREG_H__
-
-
-#define HOSTFN0_INT_STATUS               0x00014000
-#define __HOSTFN0_INT_STATUS_LVL_MK      0x00f00000
-#define __HOSTFN0_INT_STATUS_LVL_SH      20
-#define __HOSTFN0_INT_STATUS_LVL(_v)     ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
-#define __HOSTFN0_INT_STATUS_P           0x000fffff
-#define HOSTFN0_INT_MSK                  0x00014004
-#define HOST_PAGE_NUM_FN0                0x00014008
-#define __HOST_PAGE_NUM_FN               0x000001ff
-#define HOSTFN1_INT_STATUS               0x00014100
-#define __HOSTFN1_INT_STAT_LVL_MK        0x00f00000
-#define __HOSTFN1_INT_STAT_LVL_SH        20
-#define __HOSTFN1_INT_STAT_LVL(_v)       ((_v) << __HOSTFN1_INT_STAT_LVL_SH)
-#define __HOSTFN1_INT_STAT_P             0x000fffff
-#define HOSTFN1_INT_MSK                  0x00014104
-#define HOST_PAGE_NUM_FN1                0x00014108
-#define APP_PLL_400_CTL_REG              0x00014204
-#define __P_400_PLL_LOCK                 0x80000000
-#define __APP_PLL_400_SRAM_USE_100MHZ    0x00100000
-#define __APP_PLL_400_RESET_TIMER_MK     0x000e0000
-#define __APP_PLL_400_RESET_TIMER_SH     17
-#define __APP_PLL_400_RESET_TIMER(_v)    ((_v) << __APP_PLL_400_RESET_TIMER_SH)
-#define __APP_PLL_400_LOGIC_SOFT_RESET   0x00010000
-#define __APP_PLL_400_CNTLMT0_1_MK       0x0000c000
-#define __APP_PLL_400_CNTLMT0_1_SH       14
-#define __APP_PLL_400_CNTLMT0_1(_v)      ((_v) << __APP_PLL_400_CNTLMT0_1_SH)
-#define __APP_PLL_400_JITLMT0_1_MK       0x00003000
-#define __APP_PLL_400_JITLMT0_1_SH       12
-#define __APP_PLL_400_JITLMT0_1(_v)      ((_v) << __APP_PLL_400_JITLMT0_1_SH)
-#define __APP_PLL_400_HREF               0x00000800
-#define __APP_PLL_400_HDIV               0x00000400
-#define __APP_PLL_400_P0_1_MK            0x00000300
-#define __APP_PLL_400_P0_1_SH            8
-#define __APP_PLL_400_P0_1(_v)           ((_v) << __APP_PLL_400_P0_1_SH)
-#define __APP_PLL_400_Z0_2_MK            0x000000e0
-#define __APP_PLL_400_Z0_2_SH            5
-#define __APP_PLL_400_Z0_2(_v)           ((_v) << __APP_PLL_400_Z0_2_SH)
-#define __APP_PLL_400_RSEL200500         0x00000010
-#define __APP_PLL_400_ENARST             0x00000008
-#define __APP_PLL_400_BYPASS             0x00000004
-#define __APP_PLL_400_LRESETN            0x00000002
-#define __APP_PLL_400_ENABLE             0x00000001
-#define APP_PLL_212_CTL_REG              0x00014208
-#define __P_212_PLL_LOCK                 0x80000000
-#define __APP_PLL_212_RESET_TIMER_MK     0x000e0000
-#define __APP_PLL_212_RESET_TIMER_SH     17
-#define __APP_PLL_212_RESET_TIMER(_v)    ((_v) << __APP_PLL_212_RESET_TIMER_SH)
-#define __APP_PLL_212_LOGIC_SOFT_RESET   0x00010000
-#define __APP_PLL_212_CNTLMT0_1_MK       0x0000c000
-#define __APP_PLL_212_CNTLMT0_1_SH       14
-#define __APP_PLL_212_CNTLMT0_1(_v)      ((_v) << __APP_PLL_212_CNTLMT0_1_SH)
-#define __APP_PLL_212_JITLMT0_1_MK       0x00003000
-#define __APP_PLL_212_JITLMT0_1_SH       12
-#define __APP_PLL_212_JITLMT0_1(_v)      ((_v) << __APP_PLL_212_JITLMT0_1_SH)
-#define __APP_PLL_212_HREF               0x00000800
-#define __APP_PLL_212_HDIV               0x00000400
-#define __APP_PLL_212_P0_1_MK            0x00000300
-#define __APP_PLL_212_P0_1_SH            8
-#define __APP_PLL_212_P0_1(_v)           ((_v) << __APP_PLL_212_P0_1_SH)
-#define __APP_PLL_212_Z0_2_MK            0x000000e0
-#define __APP_PLL_212_Z0_2_SH            5
-#define __APP_PLL_212_Z0_2(_v)           ((_v) << __APP_PLL_212_Z0_2_SH)
-#define __APP_PLL_212_RSEL200500         0x00000010
-#define __APP_PLL_212_ENARST             0x00000008
-#define __APP_PLL_212_BYPASS             0x00000004
-#define __APP_PLL_212_LRESETN            0x00000002
-#define __APP_PLL_212_ENABLE             0x00000001
-#define HOST_SEM0_REG                    0x00014230
-#define __HOST_SEMAPHORE                 0x00000001
-#define HOST_SEM1_REG                    0x00014234
-#define HOST_SEM2_REG                    0x00014238
-#define HOST_SEM3_REG                    0x0001423c
-#define HOST_SEM0_INFO_REG               0x00014240
-#define HOST_SEM1_INFO_REG               0x00014244
-#define HOST_SEM2_INFO_REG               0x00014248
-#define HOST_SEM3_INFO_REG               0x0001424c
-#define HOSTFN0_LPU0_CMD_STAT            0x00019000
-#define __HOSTFN0_LPU0_MBOX_INFO_MK      0xfffffffe
-#define __HOSTFN0_LPU0_MBOX_INFO_SH      1
-#define __HOSTFN0_LPU0_MBOX_INFO(_v)     ((_v) << __HOSTFN0_LPU0_MBOX_INFO_SH)
-#define __HOSTFN0_LPU0_MBOX_CMD_STATUS   0x00000001
-#define LPU0_HOSTFN0_CMD_STAT            0x00019008
-#define __LPU0_HOSTFN0_MBOX_INFO_MK      0xfffffffe
-#define __LPU0_HOSTFN0_MBOX_INFO_SH      1
-#define __LPU0_HOSTFN0_MBOX_INFO(_v)     ((_v) << __LPU0_HOSTFN0_MBOX_INFO_SH)
-#define __LPU0_HOSTFN0_MBOX_CMD_STATUS   0x00000001
-#define HOSTFN1_LPU1_CMD_STAT            0x00019014
-#define __HOSTFN1_LPU1_MBOX_INFO_MK      0xfffffffe
-#define __HOSTFN1_LPU1_MBOX_INFO_SH      1
-#define __HOSTFN1_LPU1_MBOX_INFO(_v)     ((_v) << __HOSTFN1_LPU1_MBOX_INFO_SH)
-#define __HOSTFN1_LPU1_MBOX_CMD_STATUS   0x00000001
-#define LPU1_HOSTFN1_CMD_STAT            0x0001901c
-#define __LPU1_HOSTFN1_MBOX_INFO_MK      0xfffffffe
-#define __LPU1_HOSTFN1_MBOX_INFO_SH      1
-#define __LPU1_HOSTFN1_MBOX_INFO(_v)     ((_v) << __LPU1_HOSTFN1_MBOX_INFO_SH)
-#define __LPU1_HOSTFN1_MBOX_CMD_STATUS   0x00000001
-#define CPE_Q0_DEPTH                     0x00010014
-#define CPE_Q0_PI                        0x0001001c
-#define CPE_Q0_CI                        0x00010020
-#define CPE_Q1_DEPTH                     0x00010034
-#define CPE_Q1_PI                        0x0001003c
-#define CPE_Q1_CI                        0x00010040
-#define CPE_Q2_DEPTH                     0x00010054
-#define CPE_Q2_PI                        0x0001005c
-#define CPE_Q2_CI                        0x00010060
-#define CPE_Q3_DEPTH                     0x00010074
-#define CPE_Q3_PI                        0x0001007c
-#define CPE_Q3_CI                        0x00010080
-#define CPE_Q4_DEPTH                     0x00010094
-#define CPE_Q4_PI                        0x0001009c
-#define CPE_Q4_CI                        0x000100a0
-#define CPE_Q5_DEPTH                     0x000100b4
-#define CPE_Q5_PI                        0x000100bc
-#define CPE_Q5_CI                        0x000100c0
-#define CPE_Q6_DEPTH                     0x000100d4
-#define CPE_Q6_PI                        0x000100dc
-#define CPE_Q6_CI                        0x000100e0
-#define CPE_Q7_DEPTH                     0x000100f4
-#define CPE_Q7_PI                        0x000100fc
-#define CPE_Q7_CI                        0x00010100
-#define RME_Q0_DEPTH                     0x00011014
-#define RME_Q0_PI                        0x0001101c
-#define RME_Q0_CI                        0x00011020
-#define RME_Q1_DEPTH                     0x00011034
-#define RME_Q1_PI                        0x0001103c
-#define RME_Q1_CI                        0x00011040
-#define RME_Q2_DEPTH                     0x00011054
-#define RME_Q2_PI                        0x0001105c
-#define RME_Q2_CI                        0x00011060
-#define RME_Q3_DEPTH                     0x00011074
-#define RME_Q3_PI                        0x0001107c
-#define RME_Q3_CI                        0x00011080
-#define RME_Q4_DEPTH                     0x00011094
-#define RME_Q4_PI                        0x0001109c
-#define RME_Q4_CI                        0x000110a0
-#define RME_Q5_DEPTH                     0x000110b4
-#define RME_Q5_PI                        0x000110bc
-#define RME_Q5_CI                        0x000110c0
-#define RME_Q6_DEPTH                     0x000110d4
-#define RME_Q6_PI                        0x000110dc
-#define RME_Q6_CI                        0x000110e0
-#define RME_Q7_DEPTH                     0x000110f4
-#define RME_Q7_PI                        0x000110fc
-#define RME_Q7_CI                        0x00011100
-#define PSS_CTL_REG                      0x00018800
-#define __PSS_I2C_CLK_DIV_MK             0x00030000
-#define __PSS_I2C_CLK_DIV_SH             16
-#define __PSS_I2C_CLK_DIV(_v)            ((_v) << __PSS_I2C_CLK_DIV_SH)
-#define __PSS_LMEM_INIT_DONE             0x00001000
-#define __PSS_LMEM_RESET                 0x00000200
-#define __PSS_LMEM_INIT_EN               0x00000100
-#define __PSS_LPU1_RESET                 0x00000002
-#define __PSS_LPU0_RESET                 0x00000001
-#define PSS_ERR_STATUS_REG               0x00018810
-#define __PSS_LMEM1_CORR_ERR             0x00000800
-#define __PSS_LMEM0_CORR_ERR             0x00000400
-#define __PSS_LMEM1_UNCORR_ERR           0x00000200
-#define __PSS_LMEM0_UNCORR_ERR           0x00000100
-#define __PSS_BAL_PERR                   0x00000080
-#define __PSS_DIP_IF_ERR                 0x00000040
-#define __PSS_IOH_IF_ERR                 0x00000020
-#define __PSS_TDS_IF_ERR                 0x00000010
-#define __PSS_RDS_IF_ERR                 0x00000008
-#define __PSS_SGM_IF_ERR                 0x00000004
-#define __PSS_LPU1_RAM_ERR               0x00000002
-#define __PSS_LPU0_RAM_ERR               0x00000001
-#define ERR_SET_REG                      0x00018818
-#define __PSS_ERR_STATUS_SET             0x00000fff
-
-
-/*
- * These definitions are either in error/missing in spec. Its auto-generated
- * from hard coded values in regparse.pl.
- */
-#define __EMPHPOST_AT_4G_MK_FIX          0x0000001c
-#define __EMPHPOST_AT_4G_SH_FIX          0x00000002
-#define __EMPHPRE_AT_4G_FIX              0x00000003
-#define __SFP_TXRATE_EN_FIX              0x00000100
-#define __SFP_RXRATE_EN_FIX              0x00000080
-
-
-/*
- * These register definitions are auto-generated from hard coded values
- * in regparse.pl.
- */
-#define HOSTFN0_LPU_MBOX0_0              0x00019200
-#define HOSTFN1_LPU_MBOX0_8              0x00019260
-#define LPU_HOSTFN0_MBOX0_0              0x00019280
-#define LPU_HOSTFN1_MBOX0_8              0x000192e0
-
-
-/*
- * These register mapping definitions are auto-generated from mapping tables
- * in regparse.pl.
- */
-#define BFA_IOC0_HBEAT_REG               HOST_SEM0_INFO_REG
-#define BFA_IOC0_STATE_REG               HOST_SEM1_INFO_REG
-#define BFA_IOC1_HBEAT_REG               HOST_SEM2_INFO_REG
-#define BFA_IOC1_STATE_REG               HOST_SEM3_INFO_REG
-#define BFA_FW_USE_COUNT                 HOST_SEM4_INFO_REG
-#define BFA_IOC_FAIL_SYNC               HOST_SEM5_INFO_REG
-
-#define CPE_Q_DEPTH(__n) \
-       (CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH))
-#define CPE_Q_PI(__n) \
-       (CPE_Q0_PI + (__n) * (CPE_Q1_PI - CPE_Q0_PI))
-#define CPE_Q_CI(__n) \
-       (CPE_Q0_CI + (__n) * (CPE_Q1_CI - CPE_Q0_CI))
-#define RME_Q_DEPTH(__n) \
-       (RME_Q0_DEPTH + (__n) * (RME_Q1_DEPTH - RME_Q0_DEPTH))
-#define RME_Q_PI(__n) \
-       (RME_Q0_PI + (__n) * (RME_Q1_PI - RME_Q0_PI))
-#define RME_Q_CI(__n) \
-       (RME_Q0_CI + (__n) * (RME_Q1_CI - RME_Q0_CI))
-
-#define CPE_Q_NUM(__fn, __q)  (((__fn) << 2) + (__q))
-#define RME_Q_NUM(__fn, __q)  (((__fn) << 2) + (__q))
-#define CPE_Q_MASK(__q)  ((__q) & 0x3)
-#define RME_Q_MASK(__q)  ((__q) & 0x3)
-
-
-/*
- * PCI MSI-X vector defines
- */
-enum {
-    BFA_MSIX_CPE_Q0 = 0,
-    BFA_MSIX_CPE_Q1 = 1,
-    BFA_MSIX_CPE_Q2 = 2,
-    BFA_MSIX_CPE_Q3 = 3,
-    BFA_MSIX_CPE_Q4 = 4,
-    BFA_MSIX_CPE_Q5 = 5,
-    BFA_MSIX_CPE_Q6 = 6,
-    BFA_MSIX_CPE_Q7 = 7,
-    BFA_MSIX_RME_Q0 = 8,
-    BFA_MSIX_RME_Q1 = 9,
-    BFA_MSIX_RME_Q2 = 10,
-    BFA_MSIX_RME_Q3 = 11,
-    BFA_MSIX_RME_Q4 = 12,
-    BFA_MSIX_RME_Q5 = 13,
-    BFA_MSIX_RME_Q6 = 14,
-    BFA_MSIX_RME_Q7 = 15,
-    BFA_MSIX_ERR_EMC = 16,
-    BFA_MSIX_ERR_LPU0 = 17,
-    BFA_MSIX_ERR_LPU1 = 18,
-    BFA_MSIX_ERR_PSS = 19,
-    BFA_MSIX_MBOX_LPU0 = 20,
-    BFA_MSIX_MBOX_LPU1 = 21,
-    BFA_MSIX_CB_MAX = 22,
-};
-
-/*
- * And corresponding host interrupt status bit field defines
- */
-#define __HFN_INT_CPE_Q0                   0x00000001U
-#define __HFN_INT_CPE_Q1                   0x00000002U
-#define __HFN_INT_CPE_Q2                   0x00000004U
-#define __HFN_INT_CPE_Q3                   0x00000008U
-#define __HFN_INT_CPE_Q4                   0x00000010U
-#define __HFN_INT_CPE_Q5                   0x00000020U
-#define __HFN_INT_CPE_Q6                   0x00000040U
-#define __HFN_INT_CPE_Q7                   0x00000080U
-#define __HFN_INT_RME_Q0                   0x00000100U
-#define __HFN_INT_RME_Q1                   0x00000200U
-#define __HFN_INT_RME_Q2                   0x00000400U
-#define __HFN_INT_RME_Q3                   0x00000800U
-#define __HFN_INT_RME_Q4                   0x00001000U
-#define __HFN_INT_RME_Q5                   0x00002000U
-#define __HFN_INT_RME_Q6                   0x00004000U
-#define __HFN_INT_RME_Q7                   0x00008000U
-#define __HFN_INT_ERR_EMC                  0x00010000U
-#define __HFN_INT_ERR_LPU0                 0x00020000U
-#define __HFN_INT_ERR_LPU1                 0x00040000U
-#define __HFN_INT_ERR_PSS                  0x00080000U
-#define __HFN_INT_MBOX_LPU0                0x00100000U
-#define __HFN_INT_MBOX_LPU1                0x00200000U
-#define __HFN_INT_MBOX1_LPU0               0x00400000U
-#define __HFN_INT_MBOX1_LPU1               0x00800000U
-#define __HFN_INT_CPE_MASK                 0x000000ffU
-#define __HFN_INT_RME_MASK                 0x0000ff00U
-
-
-/*
- * crossbow memory map.
- */
-#define PSS_SMEM_PAGE_START    0x8000
-#define PSS_SMEM_PGNUM(_pg0, _ma)      ((_pg0) + ((_ma) >> 15))
-#define PSS_SMEM_PGOFF(_ma)    ((_ma) & 0x7fff)
-
-/*
- * End of crossbow memory map
- */
-
-
-#endif /* __BFI_CBREG_H__ */
-
diff --git a/drivers/scsi/bfa/bfi_ctreg.h b/drivers/scsi/bfa/bfi_ctreg.h
deleted file mode 100644 (file)
index fc4ce4a..0000000
+++ /dev/null
@@ -1,636 +0,0 @@
-
-/*
- * bfi_ctreg.h catapult host block register definitions
- *
- * !!! Do not edit. Auto generated. !!!
- */
-
-#ifndef __BFI_CTREG_H__
-#define __BFI_CTREG_H__
-
-
-#define HOSTFN0_LPU_MBOX0_0            0x00019200
-#define HOSTFN1_LPU_MBOX0_8            0x00019260
-#define LPU_HOSTFN0_MBOX0_0            0x00019280
-#define LPU_HOSTFN1_MBOX0_8            0x000192e0
-#define HOSTFN2_LPU_MBOX0_0            0x00019400
-#define HOSTFN3_LPU_MBOX0_8            0x00019460
-#define LPU_HOSTFN2_MBOX0_0            0x00019480
-#define LPU_HOSTFN3_MBOX0_8            0x000194e0
-#define HOSTFN0_INT_STATUS             0x00014000
-#define __HOSTFN0_HALT_OCCURRED                0x01000000
-#define __HOSTFN0_INT_STATUS_LVL_MK    0x00f00000
-#define __HOSTFN0_INT_STATUS_LVL_SH    20
-#define __HOSTFN0_INT_STATUS_LVL(_v)   ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
-#define __HOSTFN0_INT_STATUS_P_MK      0x000f0000
-#define __HOSTFN0_INT_STATUS_P_SH      16
-#define __HOSTFN0_INT_STATUS_P(_v)     ((_v) << __HOSTFN0_INT_STATUS_P_SH)
-#define __HOSTFN0_INT_STATUS_F         0x0000ffff
-#define HOSTFN0_INT_MSK                        0x00014004
-#define HOST_PAGE_NUM_FN0              0x00014008
-#define __HOST_PAGE_NUM_FN             0x000001ff
-#define HOST_MSIX_ERR_INDEX_FN0                0x0001400c
-#define __MSIX_ERR_INDEX_FN            0x000001ff
-#define HOSTFN1_INT_STATUS             0x00014100
-#define __HOSTFN1_HALT_OCCURRED                0x01000000
-#define __HOSTFN1_INT_STATUS_LVL_MK    0x00f00000
-#define __HOSTFN1_INT_STATUS_LVL_SH    20
-#define __HOSTFN1_INT_STATUS_LVL(_v)   ((_v) << __HOSTFN1_INT_STATUS_LVL_SH)
-#define __HOSTFN1_INT_STATUS_P_MK      0x000f0000
-#define __HOSTFN1_INT_STATUS_P_SH      16
-#define __HOSTFN1_INT_STATUS_P(_v)     ((_v) << __HOSTFN1_INT_STATUS_P_SH)
-#define __HOSTFN1_INT_STATUS_F         0x0000ffff
-#define HOSTFN1_INT_MSK                        0x00014104
-#define HOST_PAGE_NUM_FN1              0x00014108
-#define HOST_MSIX_ERR_INDEX_FN1                0x0001410c
-#define APP_PLL_425_CTL_REG            0x00014204
-#define __P_425_PLL_LOCK               0x80000000
-#define __APP_PLL_425_SRAM_USE_100MHZ  0x00100000
-#define __APP_PLL_425_RESET_TIMER_MK   0x000e0000
-#define __APP_PLL_425_RESET_TIMER_SH   17
-#define __APP_PLL_425_RESET_TIMER(_v)  ((_v) << __APP_PLL_425_RESET_TIMER_SH)
-#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000
-#define __APP_PLL_425_CNTLMT0_1_MK     0x0000c000
-#define __APP_PLL_425_CNTLMT0_1_SH     14
-#define __APP_PLL_425_CNTLMT0_1(_v)    ((_v) << __APP_PLL_425_CNTLMT0_1_SH)
-#define __APP_PLL_425_JITLMT0_1_MK     0x00003000
-#define __APP_PLL_425_JITLMT0_1_SH     12
-#define __APP_PLL_425_JITLMT0_1(_v)    ((_v) << __APP_PLL_425_JITLMT0_1_SH)
-#define __APP_PLL_425_HREF             0x00000800
-#define __APP_PLL_425_HDIV             0x00000400
-#define __APP_PLL_425_P0_1_MK          0x00000300
-#define __APP_PLL_425_P0_1_SH          8
-#define __APP_PLL_425_P0_1(_v)         ((_v) << __APP_PLL_425_P0_1_SH)
-#define __APP_PLL_425_Z0_2_MK          0x000000e0
-#define __APP_PLL_425_Z0_2_SH          5
-#define __APP_PLL_425_Z0_2(_v)         ((_v) << __APP_PLL_425_Z0_2_SH)
-#define __APP_PLL_425_RSEL200500       0x00000010
-#define __APP_PLL_425_ENARST           0x00000008
-#define __APP_PLL_425_BYPASS           0x00000004
-#define __APP_PLL_425_LRESETN          0x00000002
-#define __APP_PLL_425_ENABLE           0x00000001
-#define APP_PLL_312_CTL_REG            0x00014208
-#define __P_312_PLL_LOCK               0x80000000
-#define __ENABLE_MAC_AHB_1             0x00800000
-#define __ENABLE_MAC_AHB_0             0x00400000
-#define __ENABLE_MAC_1                 0x00200000
-#define __ENABLE_MAC_0                 0x00100000
-#define __APP_PLL_312_RESET_TIMER_MK   0x000e0000
-#define __APP_PLL_312_RESET_TIMER_SH   17
-#define __APP_PLL_312_RESET_TIMER(_v)  ((_v) << __APP_PLL_312_RESET_TIMER_SH)
-#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000
-#define __APP_PLL_312_CNTLMT0_1_MK     0x0000c000
-#define __APP_PLL_312_CNTLMT0_1_SH     14
-#define __APP_PLL_312_CNTLMT0_1(_v)    ((_v) << __APP_PLL_312_CNTLMT0_1_SH)
-#define __APP_PLL_312_JITLMT0_1_MK     0x00003000
-#define __APP_PLL_312_JITLMT0_1_SH     12
-#define __APP_PLL_312_JITLMT0_1(_v)    ((_v) << __APP_PLL_312_JITLMT0_1_SH)
-#define __APP_PLL_312_HREF             0x00000800
-#define __APP_PLL_312_HDIV             0x00000400
-#define __APP_PLL_312_P0_1_MK          0x00000300
-#define __APP_PLL_312_P0_1_SH          8
-#define __APP_PLL_312_P0_1(_v)         ((_v) << __APP_PLL_312_P0_1_SH)
-#define __APP_PLL_312_Z0_2_MK          0x000000e0
-#define __APP_PLL_312_Z0_2_SH          5
-#define __APP_PLL_312_Z0_2(_v)         ((_v) << __APP_PLL_312_Z0_2_SH)
-#define __APP_PLL_312_RSEL200500       0x00000010
-#define __APP_PLL_312_ENARST           0x00000008
-#define __APP_PLL_312_BYPASS           0x00000004
-#define __APP_PLL_312_LRESETN          0x00000002
-#define __APP_PLL_312_ENABLE           0x00000001
-#define MBIST_CTL_REG                  0x00014220
-#define __EDRAM_BISTR_START            0x00000004
-#define __MBIST_RESET                  0x00000002
-#define __MBIST_START                  0x00000001
-#define MBIST_STAT_REG                 0x00014224
-#define __EDRAM_BISTR_STATUS           0x00000008
-#define __EDRAM_BISTR_DONE             0x00000004
-#define __MEM_BIT_STATUS               0x00000002
-#define __MBIST_DONE                   0x00000001
-#define HOST_SEM0_REG                  0x00014230
-#define __HOST_SEMAPHORE               0x00000001
-#define HOST_SEM1_REG                  0x00014234
-#define HOST_SEM2_REG                  0x00014238
-#define HOST_SEM3_REG                  0x0001423c
-#define HOST_SEM0_INFO_REG             0x00014240
-#define HOST_SEM1_INFO_REG             0x00014244
-#define HOST_SEM2_INFO_REG             0x00014248
-#define HOST_SEM3_INFO_REG             0x0001424c
-#define ETH_MAC_SER_REG                        0x00014288
-#define __APP_EMS_CKBUFAMPIN           0x00000020
-#define __APP_EMS_REFCLKSEL            0x00000010
-#define __APP_EMS_CMLCKSEL             0x00000008
-#define __APP_EMS_REFCKBUFEN2          0x00000004
-#define __APP_EMS_REFCKBUFEN1          0x00000002
-#define __APP_EMS_CHANNEL_SEL          0x00000001
-#define HOSTFN2_INT_STATUS             0x00014300
-#define __HOSTFN2_HALT_OCCURRED                0x01000000
-#define __HOSTFN2_INT_STATUS_LVL_MK    0x00f00000
-#define __HOSTFN2_INT_STATUS_LVL_SH    20
-#define __HOSTFN2_INT_STATUS_LVL(_v)   ((_v) << __HOSTFN2_INT_STATUS_LVL_SH)
-#define __HOSTFN2_INT_STATUS_P_MK      0x000f0000
-#define __HOSTFN2_INT_STATUS_P_SH      16
-#define __HOSTFN2_INT_STATUS_P(_v)     ((_v) << __HOSTFN2_INT_STATUS_P_SH)
-#define __HOSTFN2_INT_STATUS_F         0x0000ffff
-#define HOSTFN2_INT_MSK                        0x00014304
-#define HOST_PAGE_NUM_FN2              0x00014308
-#define HOST_MSIX_ERR_INDEX_FN2                0x0001430c
-#define HOSTFN3_INT_STATUS             0x00014400
-#define __HALT_OCCURRED                        0x01000000
-#define __HOSTFN3_INT_STATUS_LVL_MK    0x00f00000
-#define __HOSTFN3_INT_STATUS_LVL_SH    20
-#define __HOSTFN3_INT_STATUS_LVL(_v)   ((_v) << __HOSTFN3_INT_STATUS_LVL_SH)
-#define __HOSTFN3_INT_STATUS_P_MK      0x000f0000
-#define __HOSTFN3_INT_STATUS_P_SH      16
-#define __HOSTFN3_INT_STATUS_P(_v)     ((_v) << __HOSTFN3_INT_STATUS_P_SH)
-#define __HOSTFN3_INT_STATUS_F         0x0000ffff
-#define HOSTFN3_INT_MSK                        0x00014404
-#define HOST_PAGE_NUM_FN3              0x00014408
-#define HOST_MSIX_ERR_INDEX_FN3                0x0001440c
-#define FNC_ID_REG                     0x00014600
-#define __FUNCTION_NUMBER              0x00000007
-#define FNC_PERS_REG                   0x00014604
-#define __F3_FUNCTION_ACTIVE           0x80000000
-#define __F3_FUNCTION_MODE             0x40000000
-#define __F3_PORT_MAP_MK               0x30000000
-#define __F3_PORT_MAP_SH               28
-#define __F3_PORT_MAP(_v)              ((_v) << __F3_PORT_MAP_SH)
-#define __F3_VM_MODE                   0x08000000
-#define __F3_INTX_STATUS_MK            0x07000000
-#define __F3_INTX_STATUS_SH            24
-#define __F3_INTX_STATUS(_v)           ((_v) << __F3_INTX_STATUS_SH)
-#define __F2_FUNCTION_ACTIVE           0x00800000
-#define __F2_FUNCTION_MODE             0x00400000
-#define __F2_PORT_MAP_MK               0x00300000
-#define __F2_PORT_MAP_SH               20
-#define __F2_PORT_MAP(_v)              ((_v) << __F2_PORT_MAP_SH)
-#define __F2_VM_MODE                   0x00080000
-#define __F2_INTX_STATUS_MK            0x00070000
-#define __F2_INTX_STATUS_SH            16
-#define __F2_INTX_STATUS(_v)           ((_v) << __F2_INTX_STATUS_SH)
-#define __F1_FUNCTION_ACTIVE           0x00008000
-#define __F1_FUNCTION_MODE             0x00004000
-#define __F1_PORT_MAP_MK               0x00003000
-#define __F1_PORT_MAP_SH               12
-#define __F1_PORT_MAP(_v)              ((_v) << __F1_PORT_MAP_SH)
-#define __F1_VM_MODE                   0x00000800
-#define __F1_INTX_STATUS_MK            0x00000700
-#define __F1_INTX_STATUS_SH            8
-#define __F1_INTX_STATUS(_v)           ((_v) << __F1_INTX_STATUS_SH)
-#define __F0_FUNCTION_ACTIVE           0x00000080
-#define __F0_FUNCTION_MODE             0x00000040
-#define __F0_PORT_MAP_MK               0x00000030
-#define __F0_PORT_MAP_SH               4
-#define __F0_PORT_MAP(_v)              ((_v) << __F0_PORT_MAP_SH)
-#define __F0_VM_MODE           0x00000008
-#define __F0_INTX_STATUS               0x00000007
-enum {
-       __F0_INTX_STATUS_MSIX           = 0x0,
-       __F0_INTX_STATUS_INTA           = 0x1,
-       __F0_INTX_STATUS_INTB           = 0x2,
-       __F0_INTX_STATUS_INTC           = 0x3,
-       __F0_INTX_STATUS_INTD           = 0x4,
-};
-#define OP_MODE                                0x0001460c
-#define __APP_ETH_CLK_LOWSPEED         0x00000004
-#define __GLOBAL_CORECLK_HALFSPEED     0x00000002
-#define __GLOBAL_FCOE_MODE             0x00000001
-#define HOST_SEM4_REG                  0x00014610
-#define HOST_SEM5_REG                  0x00014614
-#define HOST_SEM6_REG                  0x00014618
-#define HOST_SEM7_REG                  0x0001461c
-#define HOST_SEM4_INFO_REG             0x00014620
-#define HOST_SEM5_INFO_REG             0x00014624
-#define HOST_SEM6_INFO_REG             0x00014628
-#define HOST_SEM7_INFO_REG             0x0001462c
-#define HOSTFN0_LPU0_MBOX0_CMD_STAT    0x00019000
-#define __HOSTFN0_LPU0_MBOX0_INFO_MK   0xfffffffe
-#define __HOSTFN0_LPU0_MBOX0_INFO_SH   1
-#define __HOSTFN0_LPU0_MBOX0_INFO(_v)  ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH)
-#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN0_LPU1_MBOX0_CMD_STAT    0x00019004
-#define __HOSTFN0_LPU1_MBOX0_INFO_MK   0xfffffffe
-#define __HOSTFN0_LPU1_MBOX0_INFO_SH   1
-#define __HOSTFN0_LPU1_MBOX0_INFO(_v)  ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH)
-#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN0_MBOX0_CMD_STAT    0x00019008
-#define __LPU0_HOSTFN0_MBOX0_INFO_MK   0xfffffffe
-#define __LPU0_HOSTFN0_MBOX0_INFO_SH   1
-#define __LPU0_HOSTFN0_MBOX0_INFO(_v)  ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH)
-#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN0_MBOX0_CMD_STAT    0x0001900c
-#define __LPU1_HOSTFN0_MBOX0_INFO_MK   0xfffffffe
-#define __LPU1_HOSTFN0_MBOX0_INFO_SH   1
-#define __LPU1_HOSTFN0_MBOX0_INFO(_v)  ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH)
-#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN1_LPU0_MBOX0_CMD_STAT    0x00019010
-#define __HOSTFN1_LPU0_MBOX0_INFO_MK   0xfffffffe
-#define __HOSTFN1_LPU0_MBOX0_INFO_SH   1
-#define __HOSTFN1_LPU0_MBOX0_INFO(_v)  ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH)
-#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN1_LPU1_MBOX0_CMD_STAT    0x00019014
-#define __HOSTFN1_LPU1_MBOX0_INFO_MK   0xfffffffe
-#define __HOSTFN1_LPU1_MBOX0_INFO_SH   1
-#define __HOSTFN1_LPU1_MBOX0_INFO(_v)  ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH)
-#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN1_MBOX0_CMD_STAT    0x00019018
-#define __LPU0_HOSTFN1_MBOX0_INFO_MK   0xfffffffe
-#define __LPU0_HOSTFN1_MBOX0_INFO_SH   1
-#define __LPU0_HOSTFN1_MBOX0_INFO(_v)  ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH)
-#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN1_MBOX0_CMD_STAT    0x0001901c
-#define __LPU1_HOSTFN1_MBOX0_INFO_MK   0xfffffffe
-#define __LPU1_HOSTFN1_MBOX0_INFO_SH   1
-#define __LPU1_HOSTFN1_MBOX0_INFO(_v)  ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH)
-#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN2_LPU0_MBOX0_CMD_STAT    0x00019150
-#define __HOSTFN2_LPU0_MBOX0_INFO_MK   0xfffffffe
-#define __HOSTFN2_LPU0_MBOX0_INFO_SH   1
-#define __HOSTFN2_LPU0_MBOX0_INFO(_v)  ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH)
-#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN2_LPU1_MBOX0_CMD_STAT    0x00019154
-#define __HOSTFN2_LPU1_MBOX0_INFO_MK   0xfffffffe
-#define __HOSTFN2_LPU1_MBOX0_INFO_SH   1
-#define __HOSTFN2_LPU1_MBOX0_INFO(_v)  ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH)
-#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN2_MBOX0_CMD_STAT    0x00019158
-#define __LPU0_HOSTFN2_MBOX0_INFO_MK   0xfffffffe
-#define __LPU0_HOSTFN2_MBOX0_INFO_SH   1
-#define __LPU0_HOSTFN2_MBOX0_INFO(_v)  ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH)
-#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN2_MBOX0_CMD_STAT    0x0001915c
-#define __LPU1_HOSTFN2_MBOX0_INFO_MK   0xfffffffe
-#define __LPU1_HOSTFN2_MBOX0_INFO_SH   1
-#define __LPU1_HOSTFN2_MBOX0_INFO(_v)  ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH)
-#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN3_LPU0_MBOX0_CMD_STAT    0x00019160
-#define __HOSTFN3_LPU0_MBOX0_INFO_MK   0xfffffffe
-#define __HOSTFN3_LPU0_MBOX0_INFO_SH   1
-#define __HOSTFN3_LPU0_MBOX0_INFO(_v)  ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH)
-#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN3_LPU1_MBOX0_CMD_STAT    0x00019164
-#define __HOSTFN3_LPU1_MBOX0_INFO_MK   0xfffffffe
-#define __HOSTFN3_LPU1_MBOX0_INFO_SH   1
-#define __HOSTFN3_LPU1_MBOX0_INFO(_v)  ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH)
-#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN3_MBOX0_CMD_STAT    0x00019168
-#define __LPU0_HOSTFN3_MBOX0_INFO_MK   0xfffffffe
-#define __LPU0_HOSTFN3_MBOX0_INFO_SH   1
-#define __LPU0_HOSTFN3_MBOX0_INFO(_v)  ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH)
-#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN3_MBOX0_CMD_STAT    0x0001916c
-#define __LPU1_HOSTFN3_MBOX0_INFO_MK   0xfffffffe
-#define __LPU1_HOSTFN3_MBOX0_INFO_SH   1
-#define __LPU1_HOSTFN3_MBOX0_INFO(_v)  ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH)
-#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS        0x00000001
-#define FW_INIT_HALT_P0                        0x000191ac
-#define __FW_INIT_HALT_P               0x00000001
-#define FW_INIT_HALT_P1                        0x000191bc
-#define CPE_PI_PTR_Q0                  0x00038000
-#define __CPE_PI_UNUSED_MK             0xffff0000
-#define __CPE_PI_UNUSED_SH             16
-#define __CPE_PI_UNUSED(_v)            ((_v) << __CPE_PI_UNUSED_SH)
-#define __CPE_PI_PTR                   0x0000ffff
-#define CPE_PI_PTR_Q1                  0x00038040
-#define CPE_CI_PTR_Q0                  0x00038004
-#define __CPE_CI_UNUSED_MK             0xffff0000
-#define __CPE_CI_UNUSED_SH             16
-#define __CPE_CI_UNUSED(_v)            ((_v) << __CPE_CI_UNUSED_SH)
-#define __CPE_CI_PTR                   0x0000ffff
-#define CPE_CI_PTR_Q1                  0x00038044
-#define CPE_DEPTH_Q0                   0x00038008
-#define __CPE_DEPTH_UNUSED_MK          0xf8000000
-#define __CPE_DEPTH_UNUSED_SH          27
-#define __CPE_DEPTH_UNUSED(_v)         ((_v) << __CPE_DEPTH_UNUSED_SH)
-#define __CPE_MSIX_VEC_INDEX_MK                0x07ff0000
-#define __CPE_MSIX_VEC_INDEX_SH                16
-#define __CPE_MSIX_VEC_INDEX(_v)       ((_v) << __CPE_MSIX_VEC_INDEX_SH)
-#define __CPE_DEPTH                    0x0000ffff
-#define CPE_DEPTH_Q1                   0x00038048
-#define CPE_QCTRL_Q0                   0x0003800c
-#define __CPE_CTRL_UNUSED30_MK         0xfc000000
-#define __CPE_CTRL_UNUSED30_SH         26
-#define __CPE_CTRL_UNUSED30(_v)                ((_v) << __CPE_CTRL_UNUSED30_SH)
-#define __CPE_FUNC_INT_CTRL_MK         0x03000000
-#define __CPE_FUNC_INT_CTRL_SH         24
-#define __CPE_FUNC_INT_CTRL(_v)                ((_v) << __CPE_FUNC_INT_CTRL_SH)
-enum {
-       __CPE_FUNC_INT_CTRL_DISABLE             = 0x0,
-       __CPE_FUNC_INT_CTRL_F2NF                = 0x1,
-       __CPE_FUNC_INT_CTRL_3QUART              = 0x2,
-       __CPE_FUNC_INT_CTRL_HALF                = 0x3,
-};
-#define __CPE_CTRL_UNUSED20_MK         0x00f00000
-#define __CPE_CTRL_UNUSED20_SH         20
-#define __CPE_CTRL_UNUSED20(_v)                ((_v) << __CPE_CTRL_UNUSED20_SH)
-#define __CPE_SCI_TH_MK                        0x000f0000
-#define __CPE_SCI_TH_SH                        16
-#define __CPE_SCI_TH(_v)               ((_v) << __CPE_SCI_TH_SH)
-#define __CPE_CTRL_UNUSED10_MK         0x0000c000
-#define __CPE_CTRL_UNUSED10_SH         14
-#define __CPE_CTRL_UNUSED10(_v)                ((_v) << __CPE_CTRL_UNUSED10_SH)
-#define __CPE_ACK_PENDING              0x00002000
-#define __CPE_CTRL_UNUSED40_MK         0x00001c00
-#define __CPE_CTRL_UNUSED40_SH         10
-#define __CPE_CTRL_UNUSED40(_v)                ((_v) << __CPE_CTRL_UNUSED40_SH)
-#define __CPE_PCIEID_MK                        0x00000300
-#define __CPE_PCIEID_SH                        8
-#define __CPE_PCIEID(_v)               ((_v) << __CPE_PCIEID_SH)
-#define __CPE_CTRL_UNUSED00_MK         0x000000fe
-#define __CPE_CTRL_UNUSED00_SH         1
-#define __CPE_CTRL_UNUSED00(_v)                ((_v) << __CPE_CTRL_UNUSED00_SH)
-#define __CPE_ESIZE                    0x00000001
-#define CPE_QCTRL_Q1                   0x0003804c
-#define __CPE_CTRL_UNUSED31_MK         0xfc000000
-#define __CPE_CTRL_UNUSED31_SH         26
-#define __CPE_CTRL_UNUSED31(_v)                ((_v) << __CPE_CTRL_UNUSED31_SH)
-#define __CPE_CTRL_UNUSED21_MK         0x00f00000
-#define __CPE_CTRL_UNUSED21_SH         20
-#define __CPE_CTRL_UNUSED21(_v)                ((_v) << __CPE_CTRL_UNUSED21_SH)
-#define __CPE_CTRL_UNUSED11_MK         0x0000c000
-#define __CPE_CTRL_UNUSED11_SH         14
-#define __CPE_CTRL_UNUSED11(_v)                ((_v) << __CPE_CTRL_UNUSED11_SH)
-#define __CPE_CTRL_UNUSED41_MK         0x00001c00
-#define __CPE_CTRL_UNUSED41_SH         10
-#define __CPE_CTRL_UNUSED41(_v)                ((_v) << __CPE_CTRL_UNUSED41_SH)
-#define __CPE_CTRL_UNUSED01_MK         0x000000fe
-#define __CPE_CTRL_UNUSED01_SH         1
-#define __CPE_CTRL_UNUSED01(_v)                ((_v) << __CPE_CTRL_UNUSED01_SH)
-#define RME_PI_PTR_Q0                  0x00038020
-#define __LATENCY_TIME_STAMP_MK                0xffff0000
-#define __LATENCY_TIME_STAMP_SH                16
-#define __LATENCY_TIME_STAMP(_v)       ((_v) << __LATENCY_TIME_STAMP_SH)
-#define __RME_PI_PTR                   0x0000ffff
-#define RME_PI_PTR_Q1                  0x00038060
-#define RME_CI_PTR_Q0                  0x00038024
-#define __DELAY_TIME_STAMP_MK          0xffff0000
-#define __DELAY_TIME_STAMP_SH          16
-#define __DELAY_TIME_STAMP(_v)         ((_v) << __DELAY_TIME_STAMP_SH)
-#define __RME_CI_PTR                   0x0000ffff
-#define RME_CI_PTR_Q1                  0x00038064
-#define RME_DEPTH_Q0                   0x00038028
-#define __RME_DEPTH_UNUSED_MK          0xf8000000
-#define __RME_DEPTH_UNUSED_SH          27
-#define __RME_DEPTH_UNUSED(_v)         ((_v) << __RME_DEPTH_UNUSED_SH)
-#define __RME_MSIX_VEC_INDEX_MK                0x07ff0000
-#define __RME_MSIX_VEC_INDEX_SH                16
-#define __RME_MSIX_VEC_INDEX(_v)       ((_v) << __RME_MSIX_VEC_INDEX_SH)
-#define __RME_DEPTH                    0x0000ffff
-#define RME_DEPTH_Q1                   0x00038068
-#define RME_QCTRL_Q0                   0x0003802c
-#define __RME_INT_LATENCY_TIMER_MK     0xff000000
-#define __RME_INT_LATENCY_TIMER_SH     24
-#define __RME_INT_LATENCY_TIMER(_v)    ((_v) << __RME_INT_LATENCY_TIMER_SH)
-#define __RME_INT_DELAY_TIMER_MK       0x00ff0000
-#define __RME_INT_DELAY_TIMER_SH       16
-#define __RME_INT_DELAY_TIMER(_v)      ((_v) << __RME_INT_DELAY_TIMER_SH)
-#define __RME_INT_DELAY_DISABLE                0x00008000
-#define __RME_DLY_DELAY_DISABLE                0x00004000
-#define __RME_ACK_PENDING              0x00002000
-#define __RME_FULL_INTERRUPT_DISABLE   0x00001000
-#define __RME_CTRL_UNUSED10_MK         0x00000c00
-#define __RME_CTRL_UNUSED10_SH         10
-#define __RME_CTRL_UNUSED10(_v)                ((_v) << __RME_CTRL_UNUSED10_SH)
-#define __RME_PCIEID_MK                        0x00000300
-#define __RME_PCIEID_SH                        8
-#define __RME_PCIEID(_v)               ((_v) << __RME_PCIEID_SH)
-#define __RME_CTRL_UNUSED00_MK         0x000000fe
-#define __RME_CTRL_UNUSED00_SH         1
-#define __RME_CTRL_UNUSED00(_v)                ((_v) << __RME_CTRL_UNUSED00_SH)
-#define __RME_ESIZE                    0x00000001
-#define RME_QCTRL_Q1                   0x0003806c
-#define __RME_CTRL_UNUSED11_MK         0x00000c00
-#define __RME_CTRL_UNUSED11_SH         10
-#define __RME_CTRL_UNUSED11(_v)                ((_v) << __RME_CTRL_UNUSED11_SH)
-#define __RME_CTRL_UNUSED01_MK         0x000000fe
-#define __RME_CTRL_UNUSED01_SH         1
-#define __RME_CTRL_UNUSED01(_v)                ((_v) << __RME_CTRL_UNUSED01_SH)
-#define PSS_CTL_REG                    0x00018800
-#define __PSS_I2C_CLK_DIV_MK           0x007f0000
-#define __PSS_I2C_CLK_DIV_SH           16
-#define __PSS_I2C_CLK_DIV(_v)          ((_v) << __PSS_I2C_CLK_DIV_SH)
-#define __PSS_LMEM_INIT_DONE           0x00001000
-#define __PSS_LMEM_RESET               0x00000200
-#define __PSS_LMEM_INIT_EN             0x00000100
-#define __PSS_LPU1_RESET               0x00000002
-#define __PSS_LPU0_RESET               0x00000001
-#define PSS_ERR_STATUS_REG             0x00018810
-#define __PSS_LPU1_TCM_READ_ERR                0x00200000
-#define __PSS_LPU0_TCM_READ_ERR                0x00100000
-#define __PSS_LMEM5_CORR_ERR           0x00080000
-#define __PSS_LMEM4_CORR_ERR           0x00040000
-#define __PSS_LMEM3_CORR_ERR           0x00020000
-#define __PSS_LMEM2_CORR_ERR           0x00010000
-#define __PSS_LMEM1_CORR_ERR           0x00008000
-#define __PSS_LMEM0_CORR_ERR           0x00004000
-#define __PSS_LMEM5_UNCORR_ERR         0x00002000
-#define __PSS_LMEM4_UNCORR_ERR         0x00001000
-#define __PSS_LMEM3_UNCORR_ERR         0x00000800
-#define __PSS_LMEM2_UNCORR_ERR         0x00000400
-#define __PSS_LMEM1_UNCORR_ERR         0x00000200
-#define __PSS_LMEM0_UNCORR_ERR         0x00000100
-#define __PSS_BAL_PERR                 0x00000080
-#define __PSS_DIP_IF_ERR               0x00000040
-#define __PSS_IOH_IF_ERR               0x00000020
-#define __PSS_TDS_IF_ERR               0x00000010
-#define __PSS_RDS_IF_ERR               0x00000008
-#define __PSS_SGM_IF_ERR               0x00000004
-#define __PSS_LPU1_RAM_ERR             0x00000002
-#define __PSS_LPU0_RAM_ERR             0x00000001
-#define ERR_SET_REG                    0x00018818
-#define __PSS_ERR_STATUS_SET           0x003fffff
-#define PMM_1T_RESET_REG_P0            0x0002381c
-#define __PMM_1T_RESET_P               0x00000001
-#define PMM_1T_RESET_REG_P1            0x00023c1c
-#define HQM_QSET0_RXQ_DRBL_P0          0x00038000
-#define __RXQ0_ADD_VECTORS_P           0x80000000
-#define __RXQ0_STOP_P                  0x40000000
-#define __RXQ0_PRD_PTR_P               0x0000ffff
-#define HQM_QSET1_RXQ_DRBL_P0          0x00038080
-#define __RXQ1_ADD_VECTORS_P           0x80000000
-#define __RXQ1_STOP_P                  0x40000000
-#define __RXQ1_PRD_PTR_P               0x0000ffff
-#define HQM_QSET0_RXQ_DRBL_P1          0x0003c000
-#define HQM_QSET1_RXQ_DRBL_P1          0x0003c080
-#define HQM_QSET0_TXQ_DRBL_P0          0x00038020
-#define __TXQ0_ADD_VECTORS_P           0x80000000
-#define __TXQ0_STOP_P                  0x40000000
-#define __TXQ0_PRD_PTR_P               0x0000ffff
-#define HQM_QSET1_TXQ_DRBL_P0          0x000380a0
-#define __TXQ1_ADD_VECTORS_P           0x80000000
-#define __TXQ1_STOP_P                  0x40000000
-#define __TXQ1_PRD_PTR_P               0x0000ffff
-#define HQM_QSET0_TXQ_DRBL_P1          0x0003c020
-#define HQM_QSET1_TXQ_DRBL_P1          0x0003c0a0
-#define HQM_QSET0_IB_DRBL_1_P0         0x00038040
-#define __IB1_0_ACK_P                  0x80000000
-#define __IB1_0_DISABLE_P              0x40000000
-#define __IB1_0_COALESCING_CFG_P_MK    0x00ff0000
-#define __IB1_0_COALESCING_CFG_P_SH    16
-#define __IB1_0_COALESCING_CFG_P(_v)   ((_v) << __IB1_0_COALESCING_CFG_P_SH)
-#define __IB1_0_NUM_OF_ACKED_EVENTS_P  0x0000ffff
-#define HQM_QSET1_IB_DRBL_1_P0         0x000380c0
-#define __IB1_1_ACK_P                  0x80000000
-#define __IB1_1_DISABLE_P              0x40000000
-#define __IB1_1_COALESCING_CFG_P_MK    0x00ff0000
-#define __IB1_1_COALESCING_CFG_P_SH    16
-#define __IB1_1_COALESCING_CFG_P(_v)   ((_v) << __IB1_1_COALESCING_CFG_P_SH)
-#define __IB1_1_NUM_OF_ACKED_EVENTS_P  0x0000ffff
-#define HQM_QSET0_IB_DRBL_1_P1         0x0003c040
-#define HQM_QSET1_IB_DRBL_1_P1         0x0003c0c0
-#define HQM_QSET0_IB_DRBL_2_P0         0x00038060
-#define __IB2_0_ACK_P                  0x80000000
-#define __IB2_0_DISABLE_P              0x40000000
-#define __IB2_0_COALESCING_CFG_P_MK    0x00ff0000
-#define __IB2_0_COALESCING_CFG_P_SH    16
-#define __IB2_0_COALESCING_CFG_P(_v)   ((_v) << __IB2_0_COALESCING_CFG_P_SH)
-#define __IB2_0_NUM_OF_ACKED_EVENTS_P  0x0000ffff
-#define HQM_QSET1_IB_DRBL_2_P0         0x000380e0
-#define __IB2_1_ACK_P                  0x80000000
-#define __IB2_1_DISABLE_P              0x40000000
-#define __IB2_1_COALESCING_CFG_P_MK    0x00ff0000
-#define __IB2_1_COALESCING_CFG_P_SH    16
-#define __IB2_1_COALESCING_CFG_P(_v)   ((_v) << __IB2_1_COALESCING_CFG_P_SH)
-#define __IB2_1_NUM_OF_ACKED_EVENTS_P  0x0000ffff
-#define HQM_QSET0_IB_DRBL_2_P1         0x0003c060
-#define HQM_QSET1_IB_DRBL_2_P1         0x0003c0e0
-
-
-/*
- * These definitions are either in error/missing in spec. Its auto-generated
- * from hard coded values in regparse.pl.
- */
-#define __EMPHPOST_AT_4G_MK_FIX                0x0000001c
-#define __EMPHPOST_AT_4G_SH_FIX                0x00000002
-#define __EMPHPRE_AT_4G_FIX            0x00000003
-#define __SFP_TXRATE_EN_FIX            0x00000100
-#define __SFP_RXRATE_EN_FIX            0x00000080
-
-
-/*
- * These register definitions are auto-generated from hard coded values
- * in regparse.pl.
- */
-
-
-/*
- * These register mapping definitions are auto-generated from mapping tables
- * in regparse.pl.
- */
-#define BFA_IOC0_HBEAT_REG             HOST_SEM0_INFO_REG
-#define BFA_IOC0_STATE_REG             HOST_SEM1_INFO_REG
-#define BFA_IOC1_HBEAT_REG             HOST_SEM2_INFO_REG
-#define BFA_IOC1_STATE_REG             HOST_SEM3_INFO_REG
-#define BFA_FW_USE_COUNT                HOST_SEM4_INFO_REG
-#define BFA_IOC_FAIL_SYNC              HOST_SEM5_INFO_REG
-
-#define CPE_DEPTH_Q(__n) \
-       (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
-#define CPE_QCTRL_Q(__n) \
-       (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0))
-#define CPE_PI_PTR_Q(__n) \
-       (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0))
-#define CPE_CI_PTR_Q(__n) \
-       (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0))
-#define RME_DEPTH_Q(__n) \
-       (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0))
-#define RME_QCTRL_Q(__n) \
-       (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0))
-#define RME_PI_PTR_Q(__n) \
-       (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
-#define RME_CI_PTR_Q(__n) \
-       (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
-#define HQM_QSET_RXQ_DRBL_P0(__n) \
-       (HQM_QSET0_RXQ_DRBL_P0 + (__n) *        \
-       (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
-#define HQM_QSET_TXQ_DRBL_P0(__n) \
-       (HQM_QSET0_TXQ_DRBL_P0 + (__n) *        \
-       (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
-#define HQM_QSET_IB_DRBL_1_P0(__n) \
-       (HQM_QSET0_IB_DRBL_1_P0 + (__n) *       \
-       (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
-#define HQM_QSET_IB_DRBL_2_P0(__n) \
-       (HQM_QSET0_IB_DRBL_2_P0 + (__n) *       \
-       (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
-#define HQM_QSET_RXQ_DRBL_P1(__n) \
-       (HQM_QSET0_RXQ_DRBL_P1 + (__n) *        \
-       (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
-#define HQM_QSET_TXQ_DRBL_P1(__n) \
-       (HQM_QSET0_TXQ_DRBL_P1 + (__n) *        \
-       (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
-#define HQM_QSET_IB_DRBL_1_P1(__n) \
-       (HQM_QSET0_IB_DRBL_1_P1 + (__n) *       \
-       (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
-#define HQM_QSET_IB_DRBL_2_P1(__n) \
-       (HQM_QSET0_IB_DRBL_2_P1 + (__n) *       \
-       (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
-
-#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
-#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
-#define CPE_Q_MASK(__q) ((__q) & 0x3)
-#define RME_Q_MASK(__q) ((__q) & 0x3)
-
-
-/*
- * PCI MSI-X vector defines
- */
-enum {
-       BFA_MSIX_CPE_Q0 = 0,
-       BFA_MSIX_CPE_Q1 = 1,
-       BFA_MSIX_CPE_Q2 = 2,
-       BFA_MSIX_CPE_Q3 = 3,
-       BFA_MSIX_RME_Q0 = 4,
-       BFA_MSIX_RME_Q1 = 5,
-       BFA_MSIX_RME_Q2 = 6,
-       BFA_MSIX_RME_Q3 = 7,
-       BFA_MSIX_LPU_ERR = 8,
-       BFA_MSIX_CT_MAX = 9,
-};
-
-/*
- * And corresponding host interrupt status bit field defines
- */
-#define __HFN_INT_CPE_Q0               0x00000001U
-#define __HFN_INT_CPE_Q1               0x00000002U
-#define __HFN_INT_CPE_Q2               0x00000004U
-#define __HFN_INT_CPE_Q3               0x00000008U
-#define __HFN_INT_CPE_Q4               0x00000010U
-#define __HFN_INT_CPE_Q5               0x00000020U
-#define __HFN_INT_CPE_Q6               0x00000040U
-#define __HFN_INT_CPE_Q7               0x00000080U
-#define __HFN_INT_RME_Q0               0x00000100U
-#define __HFN_INT_RME_Q1               0x00000200U
-#define __HFN_INT_RME_Q2               0x00000400U
-#define __HFN_INT_RME_Q3               0x00000800U
-#define __HFN_INT_RME_Q4               0x00001000U
-#define __HFN_INT_RME_Q5               0x00002000U
-#define __HFN_INT_RME_Q6               0x00004000U
-#define __HFN_INT_RME_Q7               0x00008000U
-#define __HFN_INT_ERR_EMC              0x00010000U
-#define __HFN_INT_ERR_LPU0             0x00020000U
-#define __HFN_INT_ERR_LPU1             0x00040000U
-#define __HFN_INT_ERR_PSS              0x00080000U
-#define __HFN_INT_MBOX_LPU0            0x00100000U
-#define __HFN_INT_MBOX_LPU1            0x00200000U
-#define __HFN_INT_MBOX1_LPU0           0x00400000U
-#define __HFN_INT_MBOX1_LPU1           0x00800000U
-#define __HFN_INT_LL_HALT              0x01000000U
-#define __HFN_INT_CPE_MASK             0x000000ffU
-#define __HFN_INT_RME_MASK             0x0000ff00U
-
-
-/*
- * catapult memory map.
- */
-#define LL_PGN_HQM0            0x0096
-#define LL_PGN_HQM1            0x0097
-#define PSS_SMEM_PAGE_START    0x8000
-#define PSS_SMEM_PGNUM(_pg0, _ma)      ((_pg0) + ((_ma) >> 15))
-#define PSS_SMEM_PGOFF(_ma)    ((_ma) & 0x7fff)
-
-/*
- * End of catapult memory map
- */
-
-
-#endif /* __BFI_CTREG_H__ */
index 19e888a57555accff45b49a870973146b9df7892..0d9f1fb50db0c0ae74ea3e5e7a529b0594f436a9 100644 (file)
@@ -28,11 +28,17 @@ enum bfi_iocfc_h2i_msgs {
        BFI_IOCFC_H2I_CFG_REQ           = 1,
        BFI_IOCFC_H2I_SET_INTR_REQ      = 2,
        BFI_IOCFC_H2I_UPDATEQ_REQ       = 3,
+       BFI_IOCFC_H2I_FAA_ENABLE_REQ    = 4,
+       BFI_IOCFC_H2I_FAA_DISABLE_REQ   = 5,
+       BFI_IOCFC_H2I_FAA_QUERY_REQ     = 6,
 };
 
 enum bfi_iocfc_i2h_msgs {
        BFI_IOCFC_I2H_CFG_REPLY         = BFA_I2HM(1),
        BFI_IOCFC_I2H_UPDATEQ_RSP       = BFA_I2HM(3),
+       BFI_IOCFC_I2H_FAA_ENABLE_RSP    = BFA_I2HM(4),
+       BFI_IOCFC_I2H_FAA_DISABLE_RSP   = BFA_I2HM(5),
+       BFI_IOCFC_I2H_FAA_QUERY_RSP     = BFA_I2HM(6),
 };
 
 struct bfi_iocfc_cfg_s {
@@ -40,6 +46,12 @@ struct bfi_iocfc_cfg_s {
        u8       sense_buf_len; /*  SCSI sense length       */
        u16     rsvd_1;
        u32     endian_sig;     /*  endian signature of host     */
+       u8      rsvd_2;
+       u8      single_msix_vec;
+       u8      rsvd[2];
+       __be16  num_ioim_reqs;
+       __be16  num_fwtio_reqs;
+
 
        /*
         * Request and response circular queue base addresses, size and
@@ -54,7 +66,8 @@ struct bfi_iocfc_cfg_s {
 
        union bfi_addr_u  stats_addr;   /*  DMA-able address for stats    */
        union bfi_addr_u  cfgrsp_addr;  /*  config response dma address  */
-       union bfi_addr_u  ioim_snsbase;  /*  IO sense buffer base address */
+       union bfi_addr_u  ioim_snsbase[BFI_IOIM_SNSBUF_SEGS];
+                                       /*  IO sense buf base addr segments */
        struct bfa_iocfc_intr_attr_s intr_attr; /*  IOC interrupt attributes */
 };
 
@@ -68,11 +81,25 @@ struct bfi_iocfc_bootwwns {
        u8              rsvd[7];
 };
 
+/**
+ * Queue configuration response from firmware
+ */
+struct bfi_iocfc_qreg_s {
+       u32     cpe_q_ci_off[BFI_IOC_MAX_CQS];
+       u32     cpe_q_pi_off[BFI_IOC_MAX_CQS];
+       u32     cpe_qctl_off[BFI_IOC_MAX_CQS];
+       u32     rme_q_ci_off[BFI_IOC_MAX_CQS];
+       u32     rme_q_pi_off[BFI_IOC_MAX_CQS];
+       u32     rme_qctl_off[BFI_IOC_MAX_CQS];
+       u8      hw_qid[BFI_IOC_MAX_CQS];
+};
+
 struct bfi_iocfc_cfgrsp_s {
        struct bfa_iocfc_fwcfg_s        fwcfg;
        struct bfa_iocfc_intr_attr_s    intr_attr;
        struct bfi_iocfc_bootwwns       bootwwns;
        struct bfi_pbc_s                pbc_cfg;
+       struct bfi_iocfc_qreg_s         qreg;
 };
 
 /*
@@ -150,6 +177,37 @@ union bfi_iocfc_i2h_msg_u {
        u32 mboxmsg[BFI_IOC_MSGSZ];
 };
 
+/*
+ * BFI_IOCFC_H2I_FAA_ENABLE_REQ BFI_IOCFC_H2I_FAA_DISABLE_REQ message
+ */
+struct bfi_faa_en_dis_s {
+       struct bfi_mhdr_s mh;   /* common msg header    */
+};
+
+/*
+ * BFI_IOCFC_H2I_FAA_QUERY_REQ message
+ */
+struct bfi_faa_query_s {
+       struct bfi_mhdr_s mh;   /* common msg header    */
+       u8      faa_status;     /* FAA status           */
+       u8      addr_source;    /* PWWN source          */
+       u8      rsvd[2];
+       wwn_t   faa;            /* Fabric acquired PWWN */
+};
+
+/*
+ * BFI_IOCFC_I2H_FAA_ENABLE_RSP, BFI_IOCFC_I2H_FAA_DISABLE_RSP message
+ */
+struct bfi_faa_en_dis_rsp_s {
+       struct bfi_mhdr_s mh;   /* common msg header    */
+       u8      status;         /* updateq  status      */
+       u8      rsvd[3];
+};
+
+/*
+ * BFI_IOCFC_I2H_FAA_QUERY_RSP message
+ */
+#define bfi_faa_query_rsp_t struct bfi_faa_query_s
 
 enum bfi_fcport_h2i {
        BFI_FCPORT_H2I_ENABLE_REQ               = (1),
@@ -213,7 +271,8 @@ struct bfi_fcport_enable_req_s {
 struct bfi_fcport_set_svc_params_req_s {
        struct bfi_mhdr_s  mh;          /*  msg header */
        __be16     tx_bbcredit; /*  Tx credits */
-       u16        rsvd;
+       u8      bb_scn;         /* BB_SC FC credit recovery */
+       u8      rsvd;
 };
 
 /*
@@ -293,12 +352,12 @@ struct bfi_fcxp_send_req_s {
        u8       class;         /*  FC class used for req/rsp       */
        u8       rsp_timeout;   /*  timeout in secs, 0-no response */
        u8       cts;           /*  continue sequence               */
-       u8       lp_tag;        /*  lport tag                       */
+       u8       lp_fwtag;      /*  lport tag                       */
        struct fchs_s   fchs;   /*  request FC header structure    */
        __be32  req_len;        /*  request payload length          */
        __be32  rsp_maxlen;     /*  max response length expected   */
-       struct bfi_sge_s   req_sge[BFA_FCXP_MAX_SGES];  /*  request buf    */
-       struct bfi_sge_s   rsp_sge[BFA_FCXP_MAX_SGES];  /*  response buf   */
+       struct bfi_alen_s req_alen;     /* request buffer       */
+       struct bfi_alen_s rsp_alen;     /* response buffer      */
 };
 
 /*
@@ -328,7 +387,7 @@ struct bfi_uf_buf_post_s {
        struct bfi_mhdr_s  mh;          /*  Common msg header           */
        u16     buf_tag;        /*  buffer tag                  */
        __be16  buf_len;        /*  total buffer length */
-       struct bfi_sge_s   sge[BFA_UF_MAX_SGES]; /*  buffer DMA SGEs    */
+       struct bfi_alen_s alen; /* buffer address/len pair      */
 };
 
 struct bfi_uf_frm_rcvd_s {
@@ -346,26 +405,27 @@ enum bfi_lps_h2i_msgs {
 };
 
 enum bfi_lps_i2h_msgs {
-       BFI_LPS_H2I_LOGIN_RSP   = BFA_I2HM(1),
-       BFI_LPS_H2I_LOGOUT_RSP  = BFA_I2HM(2),
-       BFI_LPS_H2I_CVL_EVENT   = BFA_I2HM(3),
+       BFI_LPS_I2H_LOGIN_RSP   = BFA_I2HM(1),
+       BFI_LPS_I2H_LOGOUT_RSP  = BFA_I2HM(2),
+       BFI_LPS_I2H_CVL_EVENT   = BFA_I2HM(3),
 };
 
 struct bfi_lps_login_req_s {
        struct bfi_mhdr_s  mh;          /*  common msg header           */
-       u8              lp_tag;
+       u8              bfa_tag;
        u8              alpa;
        __be16          pdu_size;
        wwn_t           pwwn;
        wwn_t           nwwn;
        u8              fdisc;
        u8              auth_en;
-       u8              rsvd[2];
+       u8              lps_role;
+       u8              bb_scn;
 };
 
 struct bfi_lps_login_rsp_s {
        struct bfi_mhdr_s  mh;          /*  common msg header           */
-       u8              lp_tag;
+       u8              fw_tag;
        u8              status;
        u8              lsrjt_rsn;
        u8              lsrjt_expl;
@@ -380,31 +440,33 @@ struct bfi_lps_login_rsp_s {
        mac_t           fcf_mac;
        u8              ext_status;
        u8              brcd_switch;    /*  attached peer is brcd switch */
+       u8              bb_scn;         /* atatched port's bb_scn */
+       u8              bfa_tag;
 };
 
 struct bfi_lps_logout_req_s {
        struct bfi_mhdr_s  mh;          /*  common msg header           */
-       u8              lp_tag;
+       u8              fw_tag;
        u8              rsvd[3];
        wwn_t           port_name;
 };
 
 struct bfi_lps_logout_rsp_s {
        struct bfi_mhdr_s  mh;          /*  common msg header           */
-       u8              lp_tag;
+       u8              bfa_tag;
        u8              status;
        u8              rsvd[2];
 };
 
 struct bfi_lps_cvl_event_s {
        struct bfi_mhdr_s  mh;          /*  common msg header           */
-       u8              lp_tag;
+       u8              bfa_tag;
        u8              rsvd[3];
 };
 
 struct bfi_lps_n2n_pid_req_s {
        struct bfi_mhdr_s       mh;     /*  common msg header           */
-       u8      lp_tag;
+       u8      fw_tag;
        u32     lp_pid:24;
 };
 
@@ -439,7 +501,7 @@ struct bfi_rport_create_req_s {
        u16     bfa_handle;     /*  host rport handle           */
        __be16  max_frmsz;      /*  max rcv pdu size            */
        u32     pid:24, /*  remote port ID              */
-               lp_tag:8;       /*  local port tag              */
+               lp_fwtag:8;     /*  local port tag              */
        u32     local_pid:24,   /*  local port ID               */
                cisc:8;
        u8      fc_class;       /*  supported FC classes        */
@@ -502,62 +564,63 @@ union bfi_rport_i2h_msg_u {
  * Initiator mode I-T nexus interface defines.
  */
 
-enum bfi_itnim_h2i {
-       BFI_ITNIM_H2I_CREATE_REQ = 1,   /*  i-t nexus creation */
-       BFI_ITNIM_H2I_DELETE_REQ = 2,   /*  i-t nexus deletion */
+enum bfi_itn_h2i {
+       BFI_ITN_H2I_CREATE_REQ = 1,     /*  i-t nexus creation */
+       BFI_ITN_H2I_DELETE_REQ = 2,     /*  i-t nexus deletion */
 };
 
-enum bfi_itnim_i2h {
-       BFI_ITNIM_I2H_CREATE_RSP = BFA_I2HM(1),
-       BFI_ITNIM_I2H_DELETE_RSP = BFA_I2HM(2),
-       BFI_ITNIM_I2H_SLER_EVENT = BFA_I2HM(3),
+enum bfi_itn_i2h {
+       BFI_ITN_I2H_CREATE_RSP = BFA_I2HM(1),
+       BFI_ITN_I2H_DELETE_RSP = BFA_I2HM(2),
+       BFI_ITN_I2H_SLER_EVENT = BFA_I2HM(3),
 };
 
-struct bfi_itnim_create_req_s {
+struct bfi_itn_create_req_s {
        struct bfi_mhdr_s  mh;          /*  common msg header            */
        u16     fw_handle;      /*  f/w handle for itnim         */
        u8      class;          /*  FC class for IO              */
        u8      seq_rec;        /*  sequence recovery support    */
        u8      msg_no;         /*  seq id of the msg            */
+       u8      role;
 };
 
-struct bfi_itnim_create_rsp_s {
+struct bfi_itn_create_rsp_s {
        struct bfi_mhdr_s  mh;          /*  common msg header            */
        u16     bfa_handle;     /*  bfa handle for itnim         */
        u8      status;         /*  fcp request status           */
        u8      seq_id;         /*  seq id of the msg            */
 };
 
-struct bfi_itnim_delete_req_s {
+struct bfi_itn_delete_req_s {
        struct bfi_mhdr_s  mh;          /*  common msg header            */
        u16     fw_handle;      /*  f/w itnim handle             */
        u8      seq_id;         /*  seq id of the msg            */
        u8      rsvd;
 };
 
-struct bfi_itnim_delete_rsp_s {
+struct bfi_itn_delete_rsp_s {
        struct bfi_mhdr_s  mh;          /*  common msg header            */
        u16     bfa_handle;     /*  bfa handle for itnim         */
        u8      status;         /*  fcp request status           */
        u8      seq_id;         /*  seq id of the msg            */
 };
 
-struct bfi_itnim_sler_event_s {
+struct bfi_itn_sler_event_s {
        struct bfi_mhdr_s  mh;          /*  common msg header            */
        u16     bfa_handle;     /*  bfa handle for itnim         */
        u16     rsvd;
 };
 
-union bfi_itnim_h2i_msg_u {
-       struct bfi_itnim_create_req_s *create_req;
-       struct bfi_itnim_delete_req_s *delete_req;
+union bfi_itn_h2i_msg_u {
+       struct bfi_itn_create_req_s *create_req;
+       struct bfi_itn_delete_req_s *delete_req;
        struct bfi_msg_s        *msg;
 };
 
-union bfi_itnim_i2h_msg_u {
-       struct bfi_itnim_create_rsp_s *create_rsp;
-       struct bfi_itnim_delete_rsp_s *delete_rsp;
-       struct bfi_itnim_sler_event_s *sler_event;
+union bfi_itn_i2h_msg_u {
+       struct bfi_itn_create_rsp_s *create_rsp;
+       struct bfi_itn_delete_rsp_s *delete_rsp;
+       struct bfi_itn_sler_event_s *sler_event;
        struct bfi_msg_s        *msg;
 };
 
@@ -693,7 +756,6 @@ enum bfi_ioim_status {
        BFI_IOIM_STS_PATHTOV = 8,
 };
 
-#define BFI_IOIM_SNSLEN        (256)
 /*
  * I/O response message
  */
@@ -772,4 +834,27 @@ struct bfi_tskim_rsp_s {
 
 #pragma pack()
 
+/*
+ * Crossbow PCI MSI-X vector defines
+ */
+enum {
+       BFI_MSIX_CPE_QMIN_CB = 0,
+       BFI_MSIX_CPE_QMAX_CB = 7,
+       BFI_MSIX_RME_QMIN_CB = 8,
+       BFI_MSIX_RME_QMAX_CB = 15,
+       BFI_MSIX_CB_MAX = 22,
+};
+
+/*
+ * Catapult FC PCI MSI-X vector defines
+ */
+enum {
+       BFI_MSIX_LPU_ERR_CT = 0,
+       BFI_MSIX_CPE_QMIN_CT = 1,
+       BFI_MSIX_CPE_QMAX_CT = 4,
+       BFI_MSIX_RME_QMIN_CT = 5,
+       BFI_MSIX_RME_QMAX_CT = 8,
+       BFI_MSIX_CT_MAX = 9,
+};
+
 #endif /* __BFI_MS_H__ */
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h
new file mode 100644 (file)
index 0000000..d892064
--- /dev/null
@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * bfi_reg.h ASIC register defines for all Brocade adapter ASICs
+ */
+
+#ifndef __BFI_REG_H__
+#define __BFI_REG_H__
+
+#define HOSTFN0_INT_STATUS             0x00014000      /* cb/ct        */
+#define HOSTFN1_INT_STATUS             0x00014100      /* cb/ct        */
+#define HOSTFN2_INT_STATUS             0x00014300      /* ct           */
+#define HOSTFN3_INT_STATUS             0x00014400      /* ct           */
+#define HOSTFN0_INT_MSK                        0x00014004      /* cb/ct        */
+#define HOSTFN1_INT_MSK                        0x00014104      /* cb/ct        */
+#define HOSTFN2_INT_MSK                        0x00014304      /* ct           */
+#define HOSTFN3_INT_MSK                        0x00014404      /* ct           */
+
+#define HOST_PAGE_NUM_FN0              0x00014008      /* cb/ct        */
+#define HOST_PAGE_NUM_FN1              0x00014108      /* cb/ct        */
+#define HOST_PAGE_NUM_FN2              0x00014308      /* ct           */
+#define HOST_PAGE_NUM_FN3              0x00014408      /* ct           */
+
+#define APP_PLL_LCLK_CTL_REG           0x00014204      /* cb/ct        */
+#define __P_LCLK_PLL_LOCK              0x80000000
+#define __APP_PLL_LCLK_SRAM_USE_100MHZ 0x00100000
+#define __APP_PLL_LCLK_RESET_TIMER_MK  0x000e0000
+#define __APP_PLL_LCLK_RESET_TIMER_SH  17
+#define __APP_PLL_LCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_LCLK_RESET_TIMER_SH)
+#define __APP_PLL_LCLK_LOGIC_SOFT_RESET        0x00010000
+#define __APP_PLL_LCLK_CNTLMT0_1_MK    0x0000c000
+#define __APP_PLL_LCLK_CNTLMT0_1_SH    14
+#define __APP_PLL_LCLK_CNTLMT0_1(_v)   ((_v) << __APP_PLL_LCLK_CNTLMT0_1_SH)
+#define __APP_PLL_LCLK_JITLMT0_1_MK    0x00003000
+#define __APP_PLL_LCLK_JITLMT0_1_SH    12
+#define __APP_PLL_LCLK_JITLMT0_1(_v)   ((_v) << __APP_PLL_LCLK_JITLMT0_1_SH)
+#define __APP_PLL_LCLK_HREF            0x00000800
+#define __APP_PLL_LCLK_HDIV            0x00000400
+#define __APP_PLL_LCLK_P0_1_MK         0x00000300
+#define __APP_PLL_LCLK_P0_1_SH         8
+#define __APP_PLL_LCLK_P0_1(_v)                ((_v) << __APP_PLL_LCLK_P0_1_SH)
+#define __APP_PLL_LCLK_Z0_2_MK         0x000000e0
+#define __APP_PLL_LCLK_Z0_2_SH         5
+#define __APP_PLL_LCLK_Z0_2(_v)                ((_v) << __APP_PLL_LCLK_Z0_2_SH)
+#define __APP_PLL_LCLK_RSEL200500      0x00000010
+#define __APP_PLL_LCLK_ENARST          0x00000008
+#define __APP_PLL_LCLK_BYPASS          0x00000004
+#define __APP_PLL_LCLK_LRESETN         0x00000002
+#define __APP_PLL_LCLK_ENABLE          0x00000001
+#define APP_PLL_SCLK_CTL_REG           0x00014208      /* cb/ct        */
+#define __P_SCLK_PLL_LOCK              0x80000000
+#define __APP_PLL_SCLK_RESET_TIMER_MK  0x000e0000
+#define __APP_PLL_SCLK_RESET_TIMER_SH  17
+#define __APP_PLL_SCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_SCLK_RESET_TIMER_SH)
+#define __APP_PLL_SCLK_LOGIC_SOFT_RESET        0x00010000
+#define __APP_PLL_SCLK_CNTLMT0_1_MK    0x0000c000
+#define __APP_PLL_SCLK_CNTLMT0_1_SH    14
+#define __APP_PLL_SCLK_CNTLMT0_1(_v)   ((_v) << __APP_PLL_SCLK_CNTLMT0_1_SH)
+#define __APP_PLL_SCLK_JITLMT0_1_MK    0x00003000
+#define __APP_PLL_SCLK_JITLMT0_1_SH    12
+#define __APP_PLL_SCLK_JITLMT0_1(_v)   ((_v) << __APP_PLL_SCLK_JITLMT0_1_SH)
+#define __APP_PLL_SCLK_HREF            0x00000800
+#define __APP_PLL_SCLK_HDIV            0x00000400
+#define __APP_PLL_SCLK_P0_1_MK         0x00000300
+#define __APP_PLL_SCLK_P0_1_SH         8
+#define __APP_PLL_SCLK_P0_1(_v)                ((_v) << __APP_PLL_SCLK_P0_1_SH)
+#define __APP_PLL_SCLK_Z0_2_MK         0x000000e0
+#define __APP_PLL_SCLK_Z0_2_SH         5
+#define __APP_PLL_SCLK_Z0_2(_v)                ((_v) << __APP_PLL_SCLK_Z0_2_SH)
+#define __APP_PLL_SCLK_RSEL200500      0x00000010
+#define __APP_PLL_SCLK_ENARST          0x00000008
+#define __APP_PLL_SCLK_BYPASS          0x00000004
+#define __APP_PLL_SCLK_LRESETN         0x00000002
+#define __APP_PLL_SCLK_ENABLE          0x00000001
+#define __ENABLE_MAC_AHB_1             0x00800000      /* ct           */
+#define __ENABLE_MAC_AHB_0             0x00400000      /* ct           */
+#define __ENABLE_MAC_1                 0x00200000      /* ct           */
+#define __ENABLE_MAC_0                 0x00100000      /* ct           */
+
+#define HOST_SEM0_REG                  0x00014230      /* cb/ct        */
+#define HOST_SEM1_REG                  0x00014234      /* cb/ct        */
+#define HOST_SEM2_REG                  0x00014238      /* cb/ct        */
+#define HOST_SEM3_REG                  0x0001423c      /* cb/ct        */
+#define HOST_SEM4_REG                  0x00014610      /* cb/ct        */
+#define HOST_SEM5_REG                  0x00014614      /* cb/ct        */
+#define HOST_SEM6_REG                  0x00014618      /* cb/ct        */
+#define HOST_SEM7_REG                  0x0001461c      /* cb/ct        */
+#define HOST_SEM0_INFO_REG             0x00014240      /* cb/ct        */
+#define HOST_SEM1_INFO_REG             0x00014244      /* cb/ct        */
+#define HOST_SEM2_INFO_REG             0x00014248      /* cb/ct        */
+#define HOST_SEM3_INFO_REG             0x0001424c      /* cb/ct        */
+#define HOST_SEM4_INFO_REG             0x00014620      /* cb/ct        */
+#define HOST_SEM5_INFO_REG             0x00014624      /* cb/ct        */
+#define HOST_SEM6_INFO_REG             0x00014628      /* cb/ct        */
+#define HOST_SEM7_INFO_REG             0x0001462c      /* cb/ct        */
+
+#define HOSTFN0_LPU0_CMD_STAT          0x00019000      /* cb/ct        */
+#define HOSTFN0_LPU1_CMD_STAT          0x00019004      /* cb/ct        */
+#define HOSTFN1_LPU0_CMD_STAT          0x00019010      /* cb/ct        */
+#define HOSTFN1_LPU1_CMD_STAT          0x00019014      /* cb/ct        */
+#define HOSTFN2_LPU0_CMD_STAT          0x00019150      /* ct           */
+#define HOSTFN2_LPU1_CMD_STAT          0x00019154      /* ct           */
+#define HOSTFN3_LPU0_CMD_STAT          0x00019160      /* ct           */
+#define HOSTFN3_LPU1_CMD_STAT          0x00019164      /* ct           */
+#define LPU0_HOSTFN0_CMD_STAT          0x00019008      /* cb/ct        */
+#define LPU1_HOSTFN0_CMD_STAT          0x0001900c      /* cb/ct        */
+#define LPU0_HOSTFN1_CMD_STAT          0x00019018      /* cb/ct        */
+#define LPU1_HOSTFN1_CMD_STAT          0x0001901c      /* cb/ct        */
+#define LPU0_HOSTFN2_CMD_STAT          0x00019158      /* ct           */
+#define LPU1_HOSTFN2_CMD_STAT          0x0001915c      /* ct           */
+#define LPU0_HOSTFN3_CMD_STAT          0x00019168      /* ct           */
+#define LPU1_HOSTFN3_CMD_STAT          0x0001916c      /* ct           */
+
+#define PSS_CTL_REG                    0x00018800      /* cb/ct        */
+#define __PSS_I2C_CLK_DIV_MK           0x007f0000
+#define __PSS_I2C_CLK_DIV_SH           16
+#define __PSS_I2C_CLK_DIV(_v)          ((_v) << __PSS_I2C_CLK_DIV_SH)
+#define __PSS_LMEM_INIT_DONE           0x00001000
+#define __PSS_LMEM_RESET               0x00000200
+#define __PSS_LMEM_INIT_EN             0x00000100
+#define __PSS_LPU1_RESET               0x00000002
+#define __PSS_LPU0_RESET               0x00000001
+#define PSS_ERR_STATUS_REG             0x00018810      /* cb/ct        */
+#define ERR_SET_REG                    0x00018818      /* cb/ct        */
+#define PSS_GPIO_OUT_REG               0x000188c0      /* cb/ct        */
+#define __PSS_GPIO_OUT_REG             0x00000fff
+#define PSS_GPIO_OE_REG                        0x000188c8      /* cb/ct        */
+#define __PSS_GPIO_OE_REG              0x000000ff
+
+#define HOSTFN0_LPU_MBOX0_0            0x00019200      /* cb/ct        */
+#define HOSTFN1_LPU_MBOX0_8            0x00019260      /* cb/ct        */
+#define LPU_HOSTFN0_MBOX0_0            0x00019280      /* cb/ct        */
+#define LPU_HOSTFN1_MBOX0_8            0x000192e0      /* cb/ct        */
+#define HOSTFN2_LPU_MBOX0_0            0x00019400      /* ct           */
+#define HOSTFN3_LPU_MBOX0_8            0x00019460      /* ct           */
+#define LPU_HOSTFN2_MBOX0_0            0x00019480      /* ct           */
+#define LPU_HOSTFN3_MBOX0_8            0x000194e0      /* ct           */
+
+#define HOST_MSIX_ERR_INDEX_FN0                0x0001400c      /* ct           */
+#define HOST_MSIX_ERR_INDEX_FN1                0x0001410c      /* ct           */
+#define HOST_MSIX_ERR_INDEX_FN2                0x0001430c      /* ct           */
+#define HOST_MSIX_ERR_INDEX_FN3                0x0001440c      /* ct           */
+
+#define MBIST_CTL_REG                  0x00014220      /* ct           */
+#define __EDRAM_BISTR_START            0x00000004
+#define MBIST_STAT_REG                 0x00014224      /* ct           */
+#define ETH_MAC_SER_REG                        0x00014288      /* ct           */
+#define __APP_EMS_CKBUFAMPIN           0x00000020
+#define __APP_EMS_REFCLKSEL            0x00000010
+#define __APP_EMS_CMLCKSEL             0x00000008
+#define __APP_EMS_REFCKBUFEN2          0x00000004
+#define __APP_EMS_REFCKBUFEN1          0x00000002
+#define __APP_EMS_CHANNEL_SEL          0x00000001
+#define FNC_PERS_REG                   0x00014604      /* ct           */
+#define __F3_FUNCTION_ACTIVE           0x80000000
+#define __F3_FUNCTION_MODE             0x40000000
+#define __F3_PORT_MAP_MK               0x30000000
+#define __F3_PORT_MAP_SH               28
+#define __F3_PORT_MAP(_v)              ((_v) << __F3_PORT_MAP_SH)
+#define __F3_VM_MODE                   0x08000000
+#define __F3_INTX_STATUS_MK            0x07000000
+#define __F3_INTX_STATUS_SH            24
+#define __F3_INTX_STATUS(_v)           ((_v) << __F3_INTX_STATUS_SH)
+#define __F2_FUNCTION_ACTIVE           0x00800000
+#define __F2_FUNCTION_MODE             0x00400000
+#define __F2_PORT_MAP_MK               0x00300000
+#define __F2_PORT_MAP_SH               20
+#define __F2_PORT_MAP(_v)              ((_v) << __F2_PORT_MAP_SH)
+#define __F2_VM_MODE                   0x00080000
+#define __F2_INTX_STATUS_MK            0x00070000
+#define __F2_INTX_STATUS_SH            16
+#define __F2_INTX_STATUS(_v)           ((_v) << __F2_INTX_STATUS_SH)
+#define __F1_FUNCTION_ACTIVE           0x00008000
+#define __F1_FUNCTION_MODE             0x00004000
+#define __F1_PORT_MAP_MK               0x00003000
+#define __F1_PORT_MAP_SH               12
+#define __F1_PORT_MAP(_v)              ((_v) << __F1_PORT_MAP_SH)
+#define __F1_VM_MODE                   0x00000800
+#define __F1_INTX_STATUS_MK            0x00000700
+#define __F1_INTX_STATUS_SH            8
+#define __F1_INTX_STATUS(_v)           ((_v) << __F1_INTX_STATUS_SH)
+#define __F0_FUNCTION_ACTIVE           0x00000080
+#define __F0_FUNCTION_MODE             0x00000040
+#define __F0_PORT_MAP_MK               0x00000030
+#define __F0_PORT_MAP_SH               4
+#define __F0_PORT_MAP(_v)              ((_v) << __F0_PORT_MAP_SH)
+#define __F0_VM_MODE                   0x00000008
+#define __F0_INTX_STATUS               0x00000007
+enum {
+       __F0_INTX_STATUS_MSIX = 0x0,
+       __F0_INTX_STATUS_INTA = 0x1,
+       __F0_INTX_STATUS_INTB = 0x2,
+       __F0_INTX_STATUS_INTC = 0x3,
+       __F0_INTX_STATUS_INTD = 0x4,
+};
+
+#define OP_MODE                                0x0001460c      /* ct           */
+#define __APP_ETH_CLK_LOWSPEED         0x00000004
+#define __GLOBAL_CORECLK_HALFSPEED     0x00000002
+#define __GLOBAL_FCOE_MODE             0x00000001
+#define FW_INIT_HALT_P0                        0x000191ac      /* ct           */
+#define __FW_INIT_HALT_P               0x00000001
+#define FW_INIT_HALT_P1                        0x000191bc      /* ct           */
+#define PMM_1T_RESET_REG_P0            0x0002381c      /* ct           */
+#define __PMM_1T_RESET_P               0x00000001
+#define PMM_1T_RESET_REG_P1            0x00023c1c      /* ct           */
+
+/**
+ * Catapult-2 specific defines
+ */
+#define CT2_PCI_CPQ_BASE               0x00030000
+#define CT2_PCI_APP_BASE               0x00030100
+#define CT2_PCI_ETH_BASE               0x00030400
+
+/*
+ * APP block registers
+ */
+#define CT2_HOSTFN_INT_STATUS          (CT2_PCI_APP_BASE + 0x00)
+#define CT2_HOSTFN_INTR_MASK           (CT2_PCI_APP_BASE + 0x04)
+#define CT2_HOSTFN_PERSONALITY0                (CT2_PCI_APP_BASE + 0x08)
+#define __PME_STATUS_                  0x00200000
+#define __PF_VF_BAR_SIZE_MODE__MK      0x00180000
+#define __PF_VF_BAR_SIZE_MODE__SH      19
+#define __PF_VF_BAR_SIZE_MODE_(_v)     ((_v) << __PF_VF_BAR_SIZE_MODE__SH)
+#define __FC_LL_PORT_MAP__MK           0x00060000
+#define __FC_LL_PORT_MAP__SH           17
+#define __FC_LL_PORT_MAP_(_v)          ((_v) << __FC_LL_PORT_MAP__SH)
+#define __PF_VF_ACTIVE_                        0x00010000
+#define __PF_VF_CFG_RDY_               0x00008000
+#define __PF_VF_ENABLE_                        0x00004000
+#define __PF_DRIVER_ACTIVE_            0x00002000
+#define __PF_PME_SEND_ENABLE_          0x00001000
+#define __PF_EXROM_OFFSET__MK          0x00000ff0
+#define __PF_EXROM_OFFSET__SH          4
+#define __PF_EXROM_OFFSET_(_v)         ((_v) << __PF_EXROM_OFFSET__SH)
+#define __FC_LL_MODE_                  0x00000008
+#define __PF_INTX_PIN_                 0x00000007
+#define CT2_HOSTFN_PERSONALITY1                (CT2_PCI_APP_BASE + 0x0C)
+#define __PF_NUM_QUEUES1__MK           0xff000000
+#define __PF_NUM_QUEUES1__SH           24
+#define __PF_NUM_QUEUES1_(_v)          ((_v) << __PF_NUM_QUEUES1__SH)
+#define __PF_VF_QUE_OFFSET1__MK                0x00ff0000
+#define __PF_VF_QUE_OFFSET1__SH                16
+#define __PF_VF_QUE_OFFSET1_(_v)       ((_v) << __PF_VF_QUE_OFFSET1__SH)
+#define __PF_VF_NUM_QUEUES__MK         0x0000ff00
+#define __PF_VF_NUM_QUEUES__SH         8
+#define __PF_VF_NUM_QUEUES_(_v)                ((_v) << __PF_VF_NUM_QUEUES__SH)
+#define __PF_VF_QUE_OFFSET_            0x000000ff
+#define CT2_HOSTFN_PAGE_NUM            (CT2_PCI_APP_BASE + 0x18)
+#define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR      (CT2_PCI_APP_BASE + 0x38)
+
+/*
+ * Catapult-2 CPQ block registers
+ */
+#define CT2_HOSTFN_LPU0_MBOX0          (CT2_PCI_CPQ_BASE + 0x00)
+#define CT2_HOSTFN_LPU1_MBOX0          (CT2_PCI_CPQ_BASE + 0x20)
+#define CT2_LPU0_HOSTFN_MBOX0          (CT2_PCI_CPQ_BASE + 0x40)
+#define CT2_LPU1_HOSTFN_MBOX0          (CT2_PCI_CPQ_BASE + 0x60)
+#define CT2_HOSTFN_LPU0_CMD_STAT       (CT2_PCI_CPQ_BASE + 0x80)
+#define CT2_HOSTFN_LPU1_CMD_STAT       (CT2_PCI_CPQ_BASE + 0x84)
+#define CT2_LPU0_HOSTFN_CMD_STAT       (CT2_PCI_CPQ_BASE + 0x88)
+#define CT2_LPU1_HOSTFN_CMD_STAT       (CT2_PCI_CPQ_BASE + 0x8c)
+#define CT2_HOSTFN_LPU0_READ_STAT      (CT2_PCI_CPQ_BASE + 0x90)
+#define CT2_HOSTFN_LPU1_READ_STAT      (CT2_PCI_CPQ_BASE + 0x94)
+#define CT2_LPU0_HOSTFN_MBOX0_MSK      (CT2_PCI_CPQ_BASE + 0x98)
+#define CT2_LPU1_HOSTFN_MBOX0_MSK      (CT2_PCI_CPQ_BASE + 0x9C)
+#define CT2_HOST_SEM0_REG              0x000148f0
+#define CT2_HOST_SEM1_REG              0x000148f4
+#define CT2_HOST_SEM2_REG              0x000148f8
+#define CT2_HOST_SEM3_REG              0x000148fc
+#define CT2_HOST_SEM4_REG              0x00014900
+#define CT2_HOST_SEM5_REG              0x00014904
+#define CT2_HOST_SEM6_REG              0x00014908
+#define CT2_HOST_SEM7_REG              0x0001490c
+#define CT2_HOST_SEM0_INFO_REG         0x000148b0
+#define CT2_HOST_SEM1_INFO_REG         0x000148b4
+#define CT2_HOST_SEM2_INFO_REG         0x000148b8
+#define CT2_HOST_SEM3_INFO_REG         0x000148bc
+#define CT2_HOST_SEM4_INFO_REG         0x000148c0
+#define CT2_HOST_SEM5_INFO_REG         0x000148c4
+#define CT2_HOST_SEM6_INFO_REG         0x000148c8
+#define CT2_HOST_SEM7_INFO_REG         0x000148cc
+
+#define CT2_APP_PLL_LCLK_CTL_REG       0x00014808
+#define __APP_LPUCLK_HALFSPEED         0x40000000
+#define __APP_PLL_LCLK_LOAD            0x20000000
+#define __APP_PLL_LCLK_FBCNT_MK                0x1fe00000
+#define __APP_PLL_LCLK_FBCNT_SH                21
+#define __APP_PLL_LCLK_FBCNT(_v)       ((_v) << __APP_PLL_SCLK_FBCNT_SH)
+enum {
+       __APP_PLL_LCLK_FBCNT_425_MHZ = 6,
+       __APP_PLL_LCLK_FBCNT_468_MHZ = 4,
+};
+#define __APP_PLL_LCLK_EXTFB           0x00000800
+#define __APP_PLL_LCLK_ENOUTS          0x00000400
+#define __APP_PLL_LCLK_RATE            0x00000010
+#define CT2_APP_PLL_SCLK_CTL_REG       0x0001480c
+#define __P_SCLK_PLL_LOCK              0x80000000
+#define __APP_PLL_SCLK_REFCLK_SEL      0x40000000
+#define __APP_PLL_SCLK_CLK_DIV2                0x20000000
+#define __APP_PLL_SCLK_LOAD            0x10000000
+#define __APP_PLL_SCLK_FBCNT_MK                0x0ff00000
+#define __APP_PLL_SCLK_FBCNT_SH                20
+#define __APP_PLL_SCLK_FBCNT(_v)       ((_v) << __APP_PLL_SCLK_FBCNT_SH)
+enum {
+       __APP_PLL_SCLK_FBCNT_NORM = 6,
+       __APP_PLL_SCLK_FBCNT_10G_FC = 10,
+};
+#define __APP_PLL_SCLK_EXTFB           0x00000800
+#define __APP_PLL_SCLK_ENOUTS          0x00000400
+#define __APP_PLL_SCLK_RATE            0x00000010
+#define CT2_PCIE_MISC_REG              0x00014804
+#define __ETH_CLK_ENABLE_PORT1         0x00000010
+#define CT2_CHIP_MISC_PRG              0x000148a4
+#define __ETH_CLK_ENABLE_PORT0         0x00004000
+#define __APP_LPU_SPEED                        0x00000002
+#define CT2_MBIST_STAT_REG             0x00014818
+#define CT2_MBIST_CTL_REG              0x0001481c
+#define CT2_PMM_1T_CONTROL_REG_P0      0x0002381c
+#define __PMM_1T_PNDB_P                        0x00000002
+#define CT2_PMM_1T_CONTROL_REG_P1      0x00023c1c
+#define CT2_WGN_STATUS                 0x00014990
+#define __WGN_READY                    0x00000400
+#define __GLBL_PF_VF_CFG_RDY           0x00000200
+#define CT2_NFC_CSR_SET_REG            0x00027424
+#define __HALT_NFC_CONTROLLER          0x00000002
+#define __NFC_CONTROLLER_HALTED                0x00001000
+
+#define CT2_CSI_MAC0_CONTROL_REG       0x000270d0
+#define __CSI_MAC_RESET                        0x00000010
+#define __CSI_MAC_AHB_RESET            0x00000008
+#define CT2_CSI_MAC1_CONTROL_REG       0x000270d4
+#define CT2_CSI_MAC_CONTROL_REG(__n)   \
+       (CT2_CSI_MAC0_CONTROL_REG +     \
+       (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG))
+
+/*
+ * Name semaphore registers based on usage
+ */
+#define BFA_IOC0_HBEAT_REG             HOST_SEM0_INFO_REG
+#define BFA_IOC0_STATE_REG             HOST_SEM1_INFO_REG
+#define BFA_IOC1_HBEAT_REG             HOST_SEM2_INFO_REG
+#define BFA_IOC1_STATE_REG             HOST_SEM3_INFO_REG
+#define BFA_FW_USE_COUNT               HOST_SEM4_INFO_REG
+#define BFA_IOC_FAIL_SYNC              HOST_SEM5_INFO_REG
+
+/*
+ * CT2 semaphore register locations changed
+ */
+#define CT2_BFA_IOC0_HBEAT_REG         CT2_HOST_SEM0_INFO_REG
+#define CT2_BFA_IOC0_STATE_REG         CT2_HOST_SEM1_INFO_REG
+#define CT2_BFA_IOC1_HBEAT_REG         CT2_HOST_SEM2_INFO_REG
+#define CT2_BFA_IOC1_STATE_REG         CT2_HOST_SEM3_INFO_REG
+#define CT2_BFA_FW_USE_COUNT           CT2_HOST_SEM4_INFO_REG
+#define CT2_BFA_IOC_FAIL_SYNC          CT2_HOST_SEM5_INFO_REG
+
+#define CPE_Q_NUM(__fn, __q)   (((__fn) << 2) + (__q))
+#define RME_Q_NUM(__fn, __q)   (((__fn) << 2) + (__q))
+
+/*
+ * And corresponding host interrupt status bit field defines
+ */
+#define __HFN_INT_CPE_Q0       0x00000001U
+#define __HFN_INT_CPE_Q1       0x00000002U
+#define __HFN_INT_CPE_Q2       0x00000004U
+#define __HFN_INT_CPE_Q3       0x00000008U
+#define __HFN_INT_CPE_Q4       0x00000010U
+#define __HFN_INT_CPE_Q5       0x00000020U
+#define __HFN_INT_CPE_Q6       0x00000040U
+#define __HFN_INT_CPE_Q7       0x00000080U
+#define __HFN_INT_RME_Q0       0x00000100U
+#define __HFN_INT_RME_Q1       0x00000200U
+#define __HFN_INT_RME_Q2       0x00000400U
+#define __HFN_INT_RME_Q3       0x00000800U
+#define __HFN_INT_RME_Q4       0x00001000U
+#define __HFN_INT_RME_Q5       0x00002000U
+#define __HFN_INT_RME_Q6       0x00004000U
+#define __HFN_INT_RME_Q7       0x00008000U
+#define __HFN_INT_ERR_EMC      0x00010000U
+#define __HFN_INT_ERR_LPU0     0x00020000U
+#define __HFN_INT_ERR_LPU1     0x00040000U
+#define __HFN_INT_ERR_PSS      0x00080000U
+#define __HFN_INT_MBOX_LPU0    0x00100000U
+#define __HFN_INT_MBOX_LPU1    0x00200000U
+#define __HFN_INT_MBOX1_LPU0   0x00400000U
+#define __HFN_INT_MBOX1_LPU1   0x00800000U
+#define __HFN_INT_LL_HALT      0x01000000U
+#define __HFN_INT_CPE_MASK     0x000000ffU
+#define __HFN_INT_RME_MASK     0x0000ff00U
+#define __HFN_INT_ERR_MASK     \
+       (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | \
+        __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT)
+#define __HFN_INT_FN0_MASK     \
+       (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
+        __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
+        __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0)
+#define __HFN_INT_FN1_MASK     \
+       (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
+        __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
+        __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1)
+
+/*
+ * Host interrupt status defines for catapult-2
+ */
+#define __HFN_INT_MBOX_LPU0_CT2        0x00010000U
+#define __HFN_INT_MBOX_LPU1_CT2        0x00020000U
+#define __HFN_INT_ERR_PSS_CT2  0x00040000U
+#define __HFN_INT_ERR_LPU0_CT2 0x00080000U
+#define __HFN_INT_ERR_LPU1_CT2 0x00100000U
+#define __HFN_INT_CPQ_HALT_CT2 0x00200000U
+#define __HFN_INT_ERR_WGN_CT2  0x00400000U
+#define __HFN_INT_ERR_LEHRX_CT2        0x00800000U
+#define __HFN_INT_ERR_LEHTX_CT2        0x01000000U
+#define __HFN_INT_ERR_MASK_CT2 \
+       (__HFN_INT_ERR_PSS_CT2 | __HFN_INT_ERR_LPU0_CT2 | \
+        __HFN_INT_ERR_LPU1_CT2 | __HFN_INT_CPQ_HALT_CT2 | \
+        __HFN_INT_ERR_WGN_CT2 | __HFN_INT_ERR_LEHRX_CT2 | \
+        __HFN_INT_ERR_LEHTX_CT2)
+#define __HFN_INT_FN0_MASK_CT2 \
+       (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
+        __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
+        __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0_CT2)
+#define __HFN_INT_FN1_MASK_CT2 \
+       (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
+        __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
+        __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1_CT2)
+
+/*
+ * asic memory map.
+ */
+#define PSS_SMEM_PAGE_START            0x8000
+#define PSS_SMEM_PGNUM(_pg0, _ma)      ((_pg0) + ((_ma) >> 15))
+#define PSS_SMEM_PGOFF(_ma)            ((_ma) & 0x7fff)
+
+#endif /* __BFI_REG_H__ */