#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
 #define bfa_ioc_notify_fail(__ioc)                     \
                        ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_start(__ioc)               \
+                       ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
 #define bfa_ioc_sync_join(__ioc)                       \
                        ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
 #define bfa_ioc_sync_leave(__ioc)                      \
        switch (event) {
        case IOCPF_E_SEMLOCKED:
                if (bfa_ioc_firmware_lock(ioc)) {
-                       if (bfa_ioc_sync_complete(ioc)) {
+                       if (bfa_ioc_sync_start(ioc)) {
                                iocpf->retry_count = 0;
                                bfa_ioc_sync_join(ioc);
                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  * execution context (driver/bios) must match.
  */
 static bool
-bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
+bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
 {
        struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
 
        if (fwhdr.signature != drv_fwhdr->signature)
                return false;
 
-       if (fwhdr.exec != drv_fwhdr->exec)
+       if (swab32(fwhdr.param) != boot_env)
                return false;
 
        return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
 {
        enum bfi_ioc_state ioc_fwstate;
        bool fwvalid;
+       u32 boot_env;
 
        ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
 
+       boot_env = BFI_BOOT_LOADER_OS;
+
        if (force)
                ioc_fwstate = BFI_IOC_UNINIT;
 
         * check if firmware is valid
         */
        fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
-               false : bfa_ioc_fwver_valid(ioc);
+               false : bfa_ioc_fwver_valid(ioc, boot_env);
 
        if (!fwvalid) {
-               bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+               bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
                return;
        }
 
        /**
         * Initialize the h/w for any other states.
         */
-       bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+       bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
 }
 
 void
  */
 static void
 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
-                   u32 boot_param)
+                   u32 boot_env)
 {
        u32 *fwimg;
        u32 pgnum, pgoff;
        /*
         * Set boot type and boot param at the end.
        */
-       writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
+       writel(boot_type, ((ioc->ioc_regs.smem_page_start)
                        + (BFI_BOOT_TYPE_OFF)));
-       writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
-                       + (BFI_BOOT_PARAM_OFF)));
+       writel(boot_env, ((ioc->ioc_regs.smem_page_start)
+                       + (BFI_BOOT_LOADER_OFF)));
 }
 
 static void
  * as the entry vector.
  */
 static void
-bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
+bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
 {
        void __iomem *rb;
 
         * Initialize IOC state of all functions on a chip reset.
         */
        rb = ioc->pcidev.pci_bar_kva;
-       if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
+       if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
                writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
                writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
        } else {
        }
 
        bfa_ioc_msgflush(ioc);
-       bfa_ioc_download_fw(ioc, boot_type, boot_param);
+       bfa_ioc_download_fw(ioc, boot_type, boot_env);
 
        /**
         * Enable interrupts just before starting LPU
 
 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
 static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
+static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
 static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
        nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
        nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
        nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+       nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
        nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
        nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
        nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
        bfa_nw_ioc_hw_sem_release(ioc);
 }
 
+/**
+ * Synchronized IOC failure processing routines
+ */
+static bool
+bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
+{
+       u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+       u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+
+       /*
+        * Driver load time.  If the sync required bit for this PCI fn
+        * is set, it is due to an unclean exit by the driver for this
+        * PCI fn in the previous incarnation. Whoever comes here first
+        * should clean it up, no matter which PCI fn.
+        */
+
+       if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
+               writel(0, ioc->ioc_regs.ioc_fail_sync);
+               writel(1, ioc->ioc_regs.ioc_usage_reg);
+               writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+               writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+               return true;
+       }
+
+       return bfa_ioc_ct_sync_complete(ioc);
+}
 /**
  * Synchronized IOC failure processing routines
  */