/* Word 12, 13, 14, 15 - is zero */
 }
 
+struct lpfc_nvmet_rcv_ctx *
+lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
+{
+       struct lpfc_nvmet_rcv_ctx *ctxp;
+       unsigned long iflag;
+       bool found = false;
+
+       spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
+       list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
+               if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
+                       continue;
+
+               found = true;
+               break;
+       }
+       spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
+       if (found)
+               return ctxp;
+
+       return NULL;
+}
+
+struct lpfc_nvmet_rcv_ctx *
+lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
+{
+       struct lpfc_nvmet_rcv_ctx *ctxp;
+       unsigned long iflag;
+       bool found = false;
+
+       spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
+       list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
+               if (ctxp->oxid != oxid || ctxp->sid != sid)
+                       continue;
+
+               found = true;
+               break;
+       }
+       spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
+       if (found)
+               return ctxp;
+
+       return NULL;
+}
+
 static void
 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
 {
        lockdep_assert_held(&ctxp->ctxlock);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-                       "6313 NVMET Defer ctx release xri x%x flg x%x\n",
+                       "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
                        ctxp->oxid, ctxp->flag);
 
        if (ctxp->flag & LPFC_NVMET_CTX_RLS)
                return;
 
        ctxp->flag |= LPFC_NVMET_CTX_RLS;
+       spin_lock(&phba->sli4_hba.t_active_list_lock);
+       list_del(&ctxp->list);
+       spin_unlock(&phba->sli4_hba.t_active_list_lock);
        spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
        list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
        spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 #endif
                atomic_inc(&tgtp->rcv_fcp_cmd_in);
 
-               /*  flag new work queued, replacement buffer has already
-                *  been reposted
-                */
+               /* Indicate that a replacement buffer has been posted */
                spin_lock_irqsave(&ctxp->ctxlock, iflag);
                ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
                spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
         * Use the CPU context list, from the MRQ the IO was received on
         * (ctxp->idx), to save context structure.
         */
+       spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
+       list_del_init(&ctxp->list);
+       spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
        cpu = raw_smp_processor_id();
        infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
        spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
                }
 
                lpfc_printf_log(phba, KERN_INFO, logerr,
-                               "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
-                               ctxp->oxid, status, result, ctxp->flag);
+                               "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
+                               "XBUSY:x%x\n",
+                               ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
+                               status, result, ctxp->flag);
 
        } else {
                rsp->fcp_error = NVME_SC_SUCCESS;
            (ctxp->state == LPFC_NVMET_STE_ABORT)) {
                atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
-                               "6102 IO xri x%x aborted\n",
+                               "6102 IO oxid x%x aborted\n",
                                ctxp->oxid);
                rc = -ENXIO;
                goto aerr;
                ctxp->hdwq = &phba->sli4_hba.hdwq[0];
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-                       "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
+                       "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
                        ctxp->oxid, ctxp->flag, ctxp->state);
 
        lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
        /* Since iaab/iaar are NOT set, we need to check
         * if the firmware is in process of aborting IO
         */
-       if (ctxp->flag & LPFC_NVMET_XBUSY) {
+       if (ctxp->flag & (LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP)) {
                spin_unlock_irqrestore(&ctxp->ctxlock, flags);
                return;
        }
                         ctxp->state, aborting);
 
        atomic_inc(&lpfc_nvmep->xmt_fcp_release);
+       ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
 
        if (aborting)
                return;
 
        if (!nvmebuf) {
                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
-                               "6425 Defer rcv: no buffer xri x%x: "
+                               "6425 Defer rcv: no buffer oxid x%x: "
                                "flg %x ste %x\n",
                                ctxp->oxid, ctxp->flag, ctxp->state);
                return;
        uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
        struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
        struct lpfc_nvmet_tgtport *tgtp;
+       struct nvmefc_tgt_fcp_req *req = NULL;
        struct lpfc_nodelist *ndlp;
        unsigned long iflag = 0;
        int rrq_empty = 0;
                 */
                if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
                    !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
-                       list_del(&ctxp->list);
+                       list_del_init(&ctxp->list);
                        released = true;
                }
                ctxp->flag &= ~LPFC_NVMET_XBUSY;
                }
 
                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-                               "6318 XB aborted oxid %x flg x%x (%x)\n",
+                               "6318 XB aborted oxid x%x flg x%x (%x)\n",
                                ctxp->oxid, ctxp->flag, released);
                if (released)
                        lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
        }
        spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
        spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+       ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
+       if (ctxp) {
+               /*
+                *  Abort already done by FW, so BA_ACC sent.
+                *  However, the transport may be unaware.
+                */
+               lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+                               "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
+                               "flag x%x oxid x%x rxid x%x\n",
+                               xri, ctxp->state, ctxp->flag, ctxp->oxid,
+                               rxid);
+
+               spin_lock_irqsave(&ctxp->ctxlock, iflag);
+               ctxp->flag |= LPFC_NVMET_ABTS_RCV;
+               ctxp->state = LPFC_NVMET_STE_ABORT;
+               spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+
+               lpfc_nvmeio_data(phba,
+                                "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
+                                xri, smp_processor_id(), 0);
+
+               req = &ctxp->ctx.fcp_req;
+               if (req)
+                       nvmet_fc_rcv_fcp_abort(phba->targetport, req);
+       }
 #endif
 }
 
        struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
        struct nvmefc_tgt_fcp_req *rsp;
        uint32_t sid;
-       uint16_t xri;
+       uint16_t oxid, xri;
        unsigned long iflag = 0;
 
-       xri = be16_to_cpu(fc_hdr->fh_ox_id);
        sid = sli4_sid_from_fc_hdr(fc_hdr);
+       oxid = be16_to_cpu(fc_hdr->fh_ox_id);
 
        spin_lock_irqsave(&phba->hbalock, iflag);
        spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
        list_for_each_entry_safe(ctxp, next_ctxp,
                                 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
                                 list) {
-               if (ctxp->oxid != xri || ctxp->sid != sid)
+               if (ctxp->oxid != oxid || ctxp->sid != sid)
                        continue;
 
                xri = ctxp->ctxbuf->sglq->sli4_xritag;
        spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
        spin_unlock_irqrestore(&phba->hbalock, iflag);
 
-       lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
-                        xri, raw_smp_processor_id(), 1);
+       /* check the wait list */
+       if (phba->sli4_hba.nvmet_io_wait_cnt) {
+               struct rqb_dmabuf *nvmebuf;
+               struct fc_frame_header *fc_hdr_tmp;
+               u32 sid_tmp;
+               u16 oxid_tmp;
+               bool found = false;
+
+               spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
+
+               /* match by oxid and s_id */
+               list_for_each_entry(nvmebuf,
+                                   &phba->sli4_hba.lpfc_nvmet_io_wait_list,
+                                   hbuf.list) {
+                       fc_hdr_tmp = (struct fc_frame_header *)
+                                       (nvmebuf->hbuf.virt);
+                       oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
+                       sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
+                       if (oxid_tmp != oxid || sid_tmp != sid)
+                               continue;
+
+                       lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+                                       "6321 NVMET Rcv ABTS oxid x%x from x%x "
+                                       "is waiting for a ctxp\n",
+                                       oxid, sid);
+
+                       list_del_init(&nvmebuf->hbuf.list);
+                       phba->sli4_hba.nvmet_io_wait_cnt--;
+                       found = true;
+                       break;
+               }
+               spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
+                                      iflag);
+
+               /* free buffer since already posted a new DMA buffer to RQ */
+               if (found) {
+                       nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+                       /* Respond with BA_ACC accordingly */
+                       lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
+                       return 0;
+               }
+       }
+
+       /* check active list */
+       ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
+       if (ctxp) {
+               xri = ctxp->ctxbuf->sglq->sli4_xritag;
+
+               spin_lock_irqsave(&ctxp->ctxlock, iflag);
+               ctxp->flag |= (LPFC_NVMET_ABTS_RCV | LPFC_NVMET_ABORT_OP);
+               spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+
+               lpfc_nvmeio_data(phba,
+                                "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
+                                xri, raw_smp_processor_id(), 0);
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+                               "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
+                               "flag x%x state x%x\n",
+                               ctxp->oxid, xri, ctxp->flag, ctxp->state);
+
+               if (ctxp->flag & LPFC_NVMET_TNOTIFY) {
+                       /* Notify the transport */
+                       nvmet_fc_rcv_fcp_abort(phba->targetport,
+                                              &ctxp->ctx.fcp_req);
+               } else {
+                       spin_lock_irqsave(&ctxp->ctxlock, iflag);
+                       lpfc_nvmet_defer_release(phba, ctxp);
+                       spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+               }
+               if (ctxp->state == LPFC_NVMET_STE_RCV)
+                       lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+                                                        ctxp->oxid);
+               else
+                       lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+                                                      ctxp->oxid);
+
+               lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
+               return 0;
+       }
+
+       lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
+                        oxid, raw_smp_processor_id(), 1);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-                       "6320 NVMET Rcv ABTS:rjt xid x%x\n", xri);
+                       "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
 
        /* Respond with BA_RJT accordingly */
        lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
                        return;
                }
+               if (rc == WQE_SUCCESS) {
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+                       if (ctxp->ts_cmd_nvme) {
+                               if (ctxp->ctx.fcp_req.op == NVMET_FCOP_RSP)
+                                       ctxp->ts_status_wqput = ktime_get_ns();
+                               else
+                                       ctxp->ts_data_wqput = ktime_get_ns();
+                       }
+#endif
+               } else {
+                       WARN_ON(rc);
+               }
        }
        wq->q_flag &= ~HBA_NVMET_WQFULL;
        spin_unlock_irqrestore(&pring->ring_lock, iflags);
                return;
        }
 
+       if (ctxp->flag & LPFC_NVMET_ABTS_RCV) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+                               "6324 IO oxid x%x aborted\n",
+                               ctxp->oxid);
+               return;
+       }
+
        payload = (uint32_t *)(nvmebuf->dbuf.virt);
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+       ctxp->flag |= LPFC_NVMET_TNOTIFY;
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        if (ctxp->ts_isr_cmd)
                ctxp->ts_cmd_nvme = ktime_get_ns();
                        phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
                return;
        }
+       ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
        atomic_inc(&tgtp->rcv_fcp_cmd_drop);
        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
                        "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
        sid = sli4_sid_from_fc_hdr(fc_hdr);
 
        ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
+       spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
+       list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
+       spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
        if (ctxp->state != LPFC_NVMET_STE_FREE) {
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
                                "6414 NVMET Context corrupt %d %d oxid x%x\n",
        if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
            !(ctxp->flag & LPFC_NVMET_XBUSY)) {
                spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
-               list_del(&ctxp->list);
+               list_del_init(&ctxp->list);
                spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
                released = true;
        }
        atomic_inc(&tgtp->xmt_abort_rsp);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-                       "6165 ABORT cmpl: xri x%x flg x%x (%d) "
+                       "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
                        "WCQE: %08x %08x %08x %08x\n",
                        ctxp->oxid, ctxp->flag, released,
                        wcqe->word0, wcqe->total_data_placed,
        if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
            !(ctxp->flag & LPFC_NVMET_XBUSY)) {
                spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
-               list_del(&ctxp->list);
+               list_del_init(&ctxp->list);
                spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
                released = true;
        }
        atomic_inc(&tgtp->xmt_abort_rsp);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-                       "6316 ABTS cmpl xri x%x flg x%x (%x) "
+                       "6316 ABTS cmpl oxid x%x flg x%x (%x) "
                        "WCQE: %08x %08x %08x %08x\n",
                        ctxp->oxid, ctxp->flag, released,
                        wcqe->word0, wcqe->total_data_placed,
        spin_lock_irqsave(&ctxp->ctxlock, flags);
        if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
                spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
-               list_del(&ctxp->list);
+               list_del_init(&ctxp->list);
                spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
                released = true;
        }
 
        atomic_inc(&tgtp->xmt_abort_rsp_error);
        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
-                       "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
-                       ctxp->oxid, rc);
+                       "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
+                       "(%x)\n",
+                       ctxp->oxid, rc, released);
        if (released)
                lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
        return 1;