qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
                        break;
                case ABTS_RECV_24XX:
-                       /* ensure that the ATIO queue is empty */
-                       qlt_24xx_process_atio_queue(vha);
+                       if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+                               /* ensure that the ATIO queue is empty */
+                               qlt_handle_abts_recv(vha, (response_t *)pkt);
+                               break;
+                       } else {
+                               /* drop through */
+                               qlt_24xx_process_atio_queue(vha, 1);
+                       }
                case ABTS_RESP_24XX:
                case CTIO_TYPE7:
                case NOTIFY_ACK_TYPE:
                case INTR_RSP_QUE_UPDATE_83XX:
                        qla24xx_process_response_queue(vha, rsp);
                        break;
-               case INTR_ATIO_QUE_UPDATE:
-                       qlt_24xx_process_atio_queue(vha);
+               case INTR_ATIO_QUE_UPDATE:{
+                       unsigned long flags2;
+                       spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
+                       qlt_24xx_process_atio_queue(vha, 1);
+                       spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
                        break;
-               case INTR_ATIO_RSP_QUE_UPDATE:
-                       qlt_24xx_process_atio_queue(vha);
+               }
+               case INTR_ATIO_RSP_QUE_UPDATE: {
+                       unsigned long flags2;
+                       spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
+                       qlt_24xx_process_atio_queue(vha, 1);
+                       spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+
                        qla24xx_process_response_queue(vha, rsp);
                        break;
+               }
                default:
                        ql_dbg(ql_dbg_async, vha, 0x504f,
                            "Unrecognized interrupt type (%d).\n", stat * 0xff);
                case INTR_RSP_QUE_UPDATE_83XX:
                        qla24xx_process_response_queue(vha, rsp);
                        break;
-               case INTR_ATIO_QUE_UPDATE:
-                       qlt_24xx_process_atio_queue(vha);
+               case INTR_ATIO_QUE_UPDATE:{
+                       unsigned long flags2;
+                       spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
+                       qlt_24xx_process_atio_queue(vha, 1);
+                       spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
                        break;
-               case INTR_ATIO_RSP_QUE_UPDATE:
-                       qlt_24xx_process_atio_queue(vha);
+               }
+               case INTR_ATIO_RSP_QUE_UPDATE: {
+                       unsigned long flags2;
+                       spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
+                       qlt_24xx_process_atio_queue(vha, 1);
+                       spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+
                        qla24xx_process_response_queue(vha, rsp);
                        break;
+               }
                default:
                        ql_dbg(ql_dbg_async, vha, 0x5051,
                            "Unrecognized interrupt type (%d).\n", stat & 0xff);
 
  */
 /* Predefs for callbacks handed to qla2xxx LLD */
 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
-       struct atio_from_isp *pkt);
+       struct atio_from_isp *pkt, uint8_t);
 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
        int fn, void *iocb, int flags);
 }
 
 static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
-       struct atio_from_isp *atio)
+       struct atio_from_isp *atio, uint8_t ha_locked)
 {
        ql_dbg(ql_dbg_tgt, vha, 0xe072,
                "%s: qla_target(%d): type %x ox_id %04x\n",
                            atio->u.isp24.fcp_hdr.d_id[2]);
                        break;
                }
-               qlt_24xx_atio_pkt(host, atio);
+               qlt_24xx_atio_pkt(host, atio, ha_locked);
                break;
        }
 
                                break;
                        }
                }
-               qlt_24xx_atio_pkt(host, atio);
+               qlt_24xx_atio_pkt(host, atio, ha_locked);
                break;
        }
 
 
        mutex_lock(&vha->vha_tgt.tgt_mutex);
        spin_lock_irqsave(&ha->hardware_lock, flags);
-       while (tgt->irq_cmd_count != 0) {
+       while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) {
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
                udelay(2);
                spin_lock_irqsave(&ha->hardware_lock, flags);
 /* ha->hardware_lock supposed to be held on entry */
 /* called via callback from qla2xxx */
 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
-       struct atio_from_isp *atio)
+       struct atio_from_isp *atio, uint8_t ha_locked)
 {
        struct qla_hw_data *ha = vha->hw;
        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
        int rc;
+       unsigned long flags;
 
        if (unlikely(tgt == NULL)) {
                ql_dbg(ql_dbg_io, vha, 0x3064,
         * Otherwise, some commands can stuck.
         */
 
-       tgt->irq_cmd_count++;
+       tgt->atio_irq_cmd_count++;
 
        switch (atio->u.raw.entry_type) {
        case ATIO_TYPE7:
                            "qla_target(%d): ATIO_TYPE7 "
                            "received with UNKNOWN exchange address, "
                            "sending QUEUE_FULL\n", vha->vp_idx);
+                       if (!ha_locked)
+                               spin_lock_irqsave(&ha->hardware_lock, flags);
                        qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
+                       if (!ha_locked)
+                               spin_unlock_irqrestore(&ha->hardware_lock, flags);
                        break;
                }
 
                if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
                        rc = qlt_chk_qfull_thresh_hold(vha, atio);
                        if (rc != 0) {
-                               tgt->irq_cmd_count--;
+                               tgt->atio_irq_cmd_count--;
                                return;
                        }
                        rc = qlt_handle_cmd_for_atio(vha, atio);
                }
                if (unlikely(rc != 0)) {
                        if (rc == -ESRCH) {
+                               if (!ha_locked)
+                                       spin_lock_irqsave
+                                               (&ha->hardware_lock, flags);
+
 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
                                qlt_send_busy(vha, atio, SAM_STAT_BUSY);
 #else
                                qlt_send_term_exchange(vha, NULL, atio, 1);
 #endif
+
+                               if (!ha_locked)
+                                       spin_unlock_irqrestore
+                                               (&ha->hardware_lock, flags);
+
                        } else {
                                if (tgt->tgt_stop) {
                                        ql_dbg(ql_dbg_tgt, vha, 0xe059,
                                            "qla_target(%d): Unable to send "
                                            "command to target, sending BUSY "
                                            "status.\n", vha->vp_idx);
+                                       if (!ha_locked)
+                                               spin_lock_irqsave(
+                                                   &ha->hardware_lock, flags);
                                        qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+                                       if (!ha_locked)
+                                               spin_unlock_irqrestore(
+                                                   &ha->hardware_lock, flags);
                                }
                        }
                }
                        break;
                }
                ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
+
+               if (!ha_locked)
+                       spin_lock_irqsave(&ha->hardware_lock, flags);
                qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
+               if (!ha_locked)
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
                break;
        }
 
                break;
        }
 
-       tgt->irq_cmd_count--;
+       tgt->atio_irq_cmd_count--;
 }
 
 /* ha->hardware_lock supposed to be held on entry */
  * @ha: SCSI driver HA context
  */
 void
-qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
+qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
 {
        struct qla_hw_data *ha = vha->hw;
        struct atio_from_isp *pkt;
                pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
                cnt = pkt->u.raw.entry_count;
 
-               qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
+               qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt,
+                   ha_locked);
 
                for (i = 0; i < cnt; i++) {
                        ha->tgt.atio_ring_index++;
        ha = rsp->hw;
        vha = pci_get_drvdata(ha->pdev);
 
-       spin_lock_irqsave(&ha->hardware_lock, flags);
+       spin_lock_irqsave(&ha->tgt.atio_lock, flags);
 
-       qlt_24xx_process_atio_queue(vha);
-       qla24xx_process_response_queue(vha, rsp);
+       qlt_24xx_process_atio_queue(vha, 0);
 
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
 
        return IRQ_HANDLED;
 }
 
+static void
+qlt_handle_abts_recv_work(struct work_struct *work)
+{
+       struct qla_tgt_sess_op *op = container_of(work,
+               struct qla_tgt_sess_op, work);
+       scsi_qla_host_t *vha = op->vha;
+       struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
+
+       if (qla2x00_reset_active(vha) || (op->chip_reset != ha->chip_reset))
+               return;
+
+       spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+       qlt_24xx_process_atio_queue(vha, 0);
+       spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt)
+{
+       struct qla_tgt_sess_op *op;
+
+       op = kzalloc(sizeof(*op), GFP_ATOMIC);
+
+       if (!op) {
+               /* do not reach for ATIO queue here.  This is best effort err
+                * recovery at this point.
+                */
+               qlt_response_pkt_all_vps(vha, pkt);
+               return;
+       }
+
+       memcpy(&op->atio, pkt, sizeof(*pkt));
+       op->vha = vha;
+       op->chip_reset = vha->hw->chip_reset;
+       INIT_WORK(&op->work, qlt_handle_abts_recv_work);
+       queue_work(qla_tgt_wq, &op->work);
+       return;
+}
+
 int
 qlt_mem_alloc(struct qla_hw_data *ha)
 {