ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
            __func__, bsg_job);
+
+       if (qla2x00_isp_reg_stat(ha)) {
+               ql_log(ql_log_info, vha, 0x9007,
+                   "PCI/Register disconnect.\n");
+               qla_pci_set_eeh_busy(vha);
+       }
+
        /* find the bsg job from the active list of commands */
        spin_lock_irqsave(&ha->hardware_lock, flags);
        for (que = 0; que < ha->max_req_queues; que++) {
                            sp->u.bsg_job == bsg_job) {
                                req->outstanding_cmds[cnt] = NULL;
                                spin_unlock_irqrestore(&ha->hardware_lock, flags);
-                               if (ha->isp_ops->abort_command(sp)) {
+
+                               if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
                                        ql_log(ql_log_warn, vha, 0x7089,
                                            "mbx abort_command failed.\n");
                                        bsg_reply->result = -EIO;
 
                uint32_t        n2n_fw_acc_sec:1;
                uint32_t        plogi_template_valid:1;
                uint32_t        port_isolated:1;
+               uint32_t        eeh_flush:2;
+#define EEH_FLUSH_RDY  1
+#define EEH_FLUSH_DONE 2
        } flags;
 
        uint16_t max_exchg;
        uint32_t                rsp_que_len;
        uint32_t                req_que_off;
        uint32_t                rsp_que_off;
+       unsigned long           eeh_jif;
 
        /* Multi queue data structs */
        device_reg_t *mqiobase;
 
 {
        srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
        struct srb_iocb *iocb;
+       scsi_qla_host_t *vha = sp->vha;
 
        WARN_ON(irqs_disabled());
        iocb = &sp->u.iocb_cmd;
 
        /* ref: TMR */
        kref_put(&sp->cmd_kref, qla2x00_sp_release);
+
+       if (vha && qla2x00_isp_reg_stat(vha->hw)) {
+               ql_log(ql_log_info, vha, 0x9008,
+                   "PCI/Register disconnect.\n");
+               qla_pci_set_eeh_busy(vha);
+       }
 }
 
 void qla2x00_sp_free(srb_t *sp)
 
        vha->hw->flags.port_isolated = 1;
 
+       if (qla2x00_isp_reg_stat(vha->hw)) {
+               ql_log(ql_log_info, vha, 0x9006,
+                   "PCI/Register disconnect, exiting.\n");
+               qla_pci_set_eeh_busy(vha);
+               return FAILED;
+       }
        if (qla2x00_chip_is_down(vha))
                return 0;
 
 {
        scsi_qla_host_t *vha = shost_priv(host);
 
+       if (qla2x00_isp_reg_stat(vha->hw)) {
+               ql_log(ql_log_info, vha, 0x9001,
+                   "PCI/Register disconnect, exiting.\n");
+               qla_pci_set_eeh_busy(vha);
+               return FAILED;
+       }
+
        vha->hw->flags.port_isolated = 0;
        /* Set the flag to 1, so that isp_abort can proceed */
        vha->flags.online = 1;
 
                 "To wait for ABTS response on I/O timeouts for NVMe. (default: 1)");
 
 
+u32 ql2xdelay_before_pci_error_handling = 5;
+module_param(ql2xdelay_before_pci_error_handling, uint, 0644);
+MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling,
+       "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n");
+
 static void qla2x00_clear_drv_active(struct qla_hw_data *);
 static void qla2x00_free_device(scsi_qla_host_t *);
 static int qla2xxx_map_queues(struct Scsi_Host *shost);
        }
 }
 
+static void qla_wind_down_chip(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!ha->flags.eeh_busy)
+               return;
+       if (ha->pci_error_state)
+               /* system is trying to recover */
+               return;
+
+       /*
+        * Current system is not handling PCIE error.  At this point, this is
+        * best effort to wind down the adapter.
+        */
+       if (time_after_eq(jiffies, ha->eeh_jif + ql2xdelay_before_pci_error_handling * HZ) &&
+           !ha->flags.eeh_flush) {
+               ql_log(ql_log_info, vha, 0x9009,
+                   "PCI Error detected, attempting to reset hardware.\n");
+
+               ha->isp_ops->reset_chip(vha);
+               ha->isp_ops->disable_intrs(ha);
+
+               ha->flags.eeh_flush = EEH_FLUSH_RDY;
+               ha->eeh_jif = jiffies;
+
+       } else if (ha->flags.eeh_flush == EEH_FLUSH_RDY &&
+           time_after_eq(jiffies, ha->eeh_jif +  5 * HZ)) {
+               pci_clear_master(ha->pdev);
+
+               /* flush all command */
+               qla2x00_abort_isp_cleanup(vha);
+               ha->flags.eeh_flush = EEH_FLUSH_DONE;
+
+               ql_log(ql_log_info, vha, 0x900a,
+                   "PCI Error handling complete, all IOs aborted.\n");
+       }
+}
+
 /**************************************************************************
 *   qla2x00_timer
 *
        fc_port_t *fcport = NULL;
 
        if (ha->flags.eeh_busy) {
+               qla_wind_down_chip(vha);
+
                ql_dbg(ql_dbg_timer, vha, 0x6000,
                    "EEH = %d, restarting timer.\n",
                    ha->flags.eeh_busy);
 
        spin_lock_irqsave(&base_vha->work_lock, flags);
        if (!ha->flags.eeh_busy) {
+               ha->eeh_jif = jiffies;
+               ha->flags.eeh_flush = 0;
+
                ha->flags.eeh_busy = 1;
                do_cleanup = true;
        }