enum nvmet_fcp_datadir          io_dir;
        bool                            active;
        bool                            abort;
+       bool                            aborted;
+       bool                            writedataactive;
        spinlock_t                      flock;
 
        struct nvmet_req                req;
                fod->tgtport = tgtport;
                fod->queue = queue;
                fod->active = false;
+               fod->abort = false;
+               fod->aborted = false;
+               fod->fcpreq = NULL;
                list_add_tail(&fod->fcp_list, &queue->fod_list);
                spin_lock_init(&fod->flock);
 
        if (fod) {
                list_del(&fod->fcp_list);
                fod->active = true;
-               fod->abort = false;
                /*
                 * no queue reference is taken, as it was taken by the
                 * queue lookup just prior to the allocation. The iod
        struct nvmet_fc_tgtport *tgtport = fod->tgtport;
        unsigned long flags;
 
+       fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
+                               sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+
+       fcpreq->nvmet_fc_private = NULL;
+
        spin_lock_irqsave(&queue->qlock, flags);
        list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
        fod->active = false;
+       fod->abort = false;
+       fod->aborted = false;
+       fod->writedataactive = false;
+       fod->fcpreq = NULL;
        spin_unlock_irqrestore(&queue->qlock, flags);
 
        /*
 }
 
 
-static void
-nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
-                               struct nvmefc_tgt_fcp_req *fcpreq)
-{
-       int ret;
-
-       fcpreq->op = NVMET_FCOP_ABORT;
-       fcpreq->offset = 0;
-       fcpreq->timeout = 0;
-       fcpreq->transfer_length = 0;
-       fcpreq->transferred_length = 0;
-       fcpreq->fcp_error = 0;
-       fcpreq->sg_cnt = 0;
-
-       ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fcpreq);
-       if (ret)
-               /* should never reach here !! */
-               WARN_ON(1);
-}
-
-
 static void
 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
 {
+       struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
        struct nvmet_fc_fcp_iod *fod = queue->fod;
        unsigned long flags;
-       int i;
+       int i, writedataactive;
        bool disconnect;
 
        disconnect = atomic_xchg(&queue->connected, 0);
                if (fod->active) {
                        spin_lock(&fod->flock);
                        fod->abort = true;
+                       writedataactive = fod->writedataactive;
                        spin_unlock(&fod->flock);
+                       /*
+                        * only call lldd abort routine if waiting for
+                        * writedata. other outstanding ops should finish
+                        * on their own.
+                        */
+                       if (writedataactive) {
+                               spin_lock(&fod->flock);
+                               fod->aborted = true;
+                               spin_unlock(&fod->flock);
+                               tgtport->ops->fcp_abort(
+                                       &tgtport->fc_target_port, fod->fcpreq);
+                       }
                }
        }
        spin_unlock_irqrestore(&queue->qlock, flags);
        int ret, idx;
 
        if (!template->xmt_ls_rsp || !template->fcp_op ||
+           !template->fcp_abort ||
            !template->fcp_req_release || !template->targetport_delete ||
            !template->max_hw_queues || !template->max_sgl_segments ||
            !template->max_dif_sgl_segments || !template->dma_boundary) {
 
 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
 
+static void
+nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
+                               struct nvmet_fc_fcp_iod *fod)
+{
+       struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+
+       /* data no longer needed */
+       nvmet_fc_free_tgt_pgs(fod);
+
+       /*
+        * if an ABTS was received or we issued the fcp_abort early
+        * don't call abort routine again.
+        */
+       /* no need to take lock - lock was taken earlier to get here */
+       if (!fod->aborted)
+               tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
+
+       nvmet_fc_free_fcp_iod(fod->queue, fod);
+}
+
 static void
 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
                                struct nvmet_fc_fcp_iod *fod)
 
        ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
        if (ret)
-               nvmet_fc_abort_op(tgtport, fod->fcpreq);
+               nvmet_fc_abort_op(tgtport, fod);
 }
 
 static void
 {
        struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
        struct scatterlist *sg, *datasg;
+       unsigned long flags;
        u32 tlen, sg_off;
        int ret;
 
                 */
                fod->abort = true;
 
-               if (op == NVMET_FCOP_WRITEDATA)
+               if (op == NVMET_FCOP_WRITEDATA) {
+                       spin_lock_irqsave(&fod->flock, flags);
+                       fod->writedataactive = false;
+                       spin_unlock_irqrestore(&fod->flock, flags);
                        nvmet_req_complete(&fod->req,
                                        NVME_SC_FC_TRANSPORT_ERROR);
-               else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
+               } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
                        fcpreq->fcp_error = ret;
                        fcpreq->transferred_length = 0;
                        nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
        }
 }
 
+static inline bool
+__nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
+{
+       struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+       struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+
+       /* if in the middle of an io and we need to tear down */
+       if (abort) {
+               if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
+                       nvmet_req_complete(&fod->req,
+                                       NVME_SC_FC_TRANSPORT_ERROR);
+                       return true;
+               }
+
+               nvmet_fc_abort_op(tgtport, fod);
+               return true;
+       }
+
+       return false;
+}
+
 /*
  * actual done handler for FCP operations when completed by the lldd
  */
 
        spin_lock_irqsave(&fod->flock, flags);
        abort = fod->abort;
+       fod->writedataactive = false;
        spin_unlock_irqrestore(&fod->flock, flags);
 
-       /* if in the middle of an io and we need to tear down */
-       if (abort && fcpreq->op != NVMET_FCOP_ABORT) {
-               /* data no longer needed */
-               nvmet_fc_free_tgt_pgs(fod);
-
-               nvmet_req_complete(&fod->req, fcpreq->fcp_error);
-               return;
-       }
-
        switch (fcpreq->op) {
 
        case NVMET_FCOP_WRITEDATA:
+               if (__nvmet_fc_fod_op_abort(fod, abort))
+                       return;
                if (fcpreq->fcp_error ||
                    fcpreq->transferred_length != fcpreq->transfer_length) {
+                       spin_lock(&fod->flock);
+                       fod->abort = true;
+                       spin_unlock(&fod->flock);
+
                        nvmet_req_complete(&fod->req,
                                        NVME_SC_FC_TRANSPORT_ERROR);
                        return;
 
                fod->offset += fcpreq->transferred_length;
                if (fod->offset != fod->total_length) {
+                       spin_lock_irqsave(&fod->flock, flags);
+                       fod->writedataactive = true;
+                       spin_unlock_irqrestore(&fod->flock, flags);
+
                        /* transfer the next chunk */
                        nvmet_fc_transfer_fcp_data(tgtport, fod,
                                                NVMET_FCOP_WRITEDATA);
 
        case NVMET_FCOP_READDATA:
        case NVMET_FCOP_READDATA_RSP:
+               if (__nvmet_fc_fod_op_abort(fod, abort))
+                       return;
                if (fcpreq->fcp_error ||
                    fcpreq->transferred_length != fcpreq->transfer_length) {
-                       /* data no longer needed */
-                       nvmet_fc_free_tgt_pgs(fod);
-
-                       nvmet_fc_abort_op(tgtport, fod->fcpreq);
+                       nvmet_fc_abort_op(tgtport, fod);
                        return;
                }
 
                if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
                        /* data no longer needed */
                        nvmet_fc_free_tgt_pgs(fod);
-                       fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
-                                       sizeof(fod->rspiubuf), DMA_TO_DEVICE);
                        nvmet_fc_free_fcp_iod(fod->queue, fod);
                        return;
                }
                break;
 
        case NVMET_FCOP_RSP:
-       case NVMET_FCOP_ABORT:
-               fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
-                               sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+               if (__nvmet_fc_fod_op_abort(fod, abort))
+                       return;
                nvmet_fc_free_fcp_iod(fod->queue, fod);
                break;
 
        default:
-               nvmet_fc_free_tgt_pgs(fod);
-               nvmet_fc_abort_op(tgtport, fod->fcpreq);
                break;
        }
 }
                fod->queue->sqhd = cqe->sq_head;
 
        if (abort) {
-               /* data no longer needed */
-               nvmet_fc_free_tgt_pgs(fod);
-
-               nvmet_fc_abort_op(tgtport, fod->fcpreq);
+               nvmet_fc_abort_op(tgtport, fod);
                return;
        }
 
                                &fod->queue->nvme_cq,
                                &fod->queue->nvme_sq,
                                &nvmet_fc_tgt_fcp_ops);
-       if (!ret) {     /* bad SQE content */
-               nvmet_fc_abort_op(tgtport, fod->fcpreq);
+       if (!ret) {     /* bad SQE content or invalid ctrl state */
+               nvmet_fc_abort_op(tgtport, fod);
                return;
        }
 
        return;
 
 transport_error:
-       nvmet_fc_abort_op(tgtport, fod->fcpreq);
+       nvmet_fc_abort_op(tgtport, fod);
 }
 
 /*
                        (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
                return -EIO;
 
-
        queue = nvmet_fc_find_target_queue(tgtport,
                                be64_to_cpu(cmdiu->connection_id));
        if (!queue)
 }
 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
 
+/**
+ * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
+ *                       upon the reception of an ABTS for a FCP command
+ *
+ * Notify the transport that an ABTS has been received for a FCP command
+ * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
+ * LLDD believes the command is still being worked on
+ * (template_ops->fcp_req_release() has not been called).
+ *
+ * The transport will wait for any outstanding work (an op to the LLDD,
+ * which the lldd should complete with error due to the ABTS; or the
+ * completion from the nvmet layer of the nvme command), then will
+ * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
+ * return the i/o context to the LLDD.  The LLDD may send the BA_ACC
+ * to the ABTS either after return from this function (assuming any
+ * outstanding op work has been terminated) or upon the callback being
+ * called.
+ *
+ * @target_port: pointer to the (registered) target port the FCP CMD IU
+ *              was received on.
+ * @fcpreq:     pointer to the fcpreq request structure that corresponds
+ *              to the exchange that received the ABTS.
+ */
+void
+nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
+                       struct nvmefc_tgt_fcp_req *fcpreq)
+{
+       struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+       struct nvmet_fc_tgt_queue *queue;
+       unsigned long flags;
+
+       if (!fod || fod->fcpreq != fcpreq)
+               /* job appears to have already completed, ignore abort */
+               return;
+
+       queue = fod->queue;
+
+       spin_lock_irqsave(&queue->qlock, flags);
+       if (fod->active) {
+               /*
+                * mark as abort. The abort handler, invoked upon completion
+                * of any work, will detect the aborted status and do the
+                * callback.
+                */
+               spin_lock(&fod->flock);
+               fod->abort = true;
+               fod->aborted = true;
+               spin_unlock(&fod->flock);
+       }
+       spin_unlock_irqrestore(&queue->qlock, flags);
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
+
 enum {
        FCT_TRADDR_ERR          = 0,
        FCT_TRADDR_WWNN         = 1 << 0,
 
 struct fcloop_fcpreq {
        struct fcloop_tport             *tport;
        struct nvmefc_fcp_req           *fcpreq;
+       spinlock_t                      reqlock;
        u16                             status;
+       bool                            active;
+       bool                            aborted;
        struct work_struct              work;
        struct nvmefc_tgt_fcp_req       tgt_fcp_req;
 };
 struct fcloop_ini_fcpreq {
        struct nvmefc_fcp_req           *fcpreq;
        struct fcloop_fcpreq            *tfcp_req;
+       struct work_struct              iniwork;
 };
 
 static inline struct fcloop_lsreq *
 }
 
 /*
- * FCP IO operation done. call back up initiator "done" flows.
+ * FCP IO operation done by initiator abort.
+ * call back up initiator "done" flows.
+ */
+static void
+fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
+{
+       struct fcloop_ini_fcpreq *inireq =
+               container_of(work, struct fcloop_ini_fcpreq, iniwork);
+
+       inireq->fcpreq->done(inireq->fcpreq);
+}
+
+/*
+ * FCP IO operation done by target completion.
+ * call back up initiator "done" flows.
  */
 static void
 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
        struct fcloop_fcpreq *tfcp_req =
                container_of(work, struct fcloop_fcpreq, work);
        struct fcloop_tport *tport = tfcp_req->tport;
-       struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+       struct nvmefc_fcp_req *fcpreq;
+
+       spin_lock(&tfcp_req->reqlock);
+       fcpreq = tfcp_req->fcpreq;
+       spin_unlock(&tfcp_req->reqlock);
 
-       if (tport->remoteport) {
+       if (tport->remoteport && fcpreq) {
                fcpreq->status = tfcp_req->status;
                fcpreq->done(fcpreq);
        }
 
        inireq->fcpreq = fcpreq;
        inireq->tfcp_req = tfcp_req;
+       INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
        tfcp_req->fcpreq = fcpreq;
        tfcp_req->tport = rport->targetport->private;
+       spin_lock_init(&tfcp_req->reqlock);
        INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
 
        ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
                        struct nvmefc_tgt_fcp_req *tgt_fcpreq)
 {
        struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
-       struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+       struct nvmefc_fcp_req *fcpreq;
        u32 rsplen = 0, xfrlen = 0;
-       int fcp_err = 0;
+       int fcp_err = 0, active, aborted;
        u8 op = tgt_fcpreq->op;
 
+       spin_lock(&tfcp_req->reqlock);
+       fcpreq = tfcp_req->fcpreq;
+       active = tfcp_req->active;
+       aborted = tfcp_req->aborted;
+       tfcp_req->active = true;
+       spin_unlock(&tfcp_req->reqlock);
+
+       if (unlikely(active))
+               /* illegal - call while i/o active */
+               return -EALREADY;
+
+       if (unlikely(aborted)) {
+               /* target transport has aborted i/o prior */
+               spin_lock(&tfcp_req->reqlock);
+               tfcp_req->active = false;
+               spin_unlock(&tfcp_req->reqlock);
+               tgt_fcpreq->transferred_length = 0;
+               tgt_fcpreq->fcp_error = -ECANCELED;
+               tgt_fcpreq->done(tgt_fcpreq);
+               return 0;
+       }
+
+       /*
+        * if fcpreq is NULL, the I/O has been aborted (from
+        * initiator side). For the target side, act as if all is well
+        * but don't actually move data.
+        */
+
        switch (op) {
        case NVMET_FCOP_WRITEDATA:
                xfrlen = tgt_fcpreq->transfer_length;
-               fcloop_fcp_copy_data(op, tgt_fcpreq->sg, fcpreq->first_sgl,
-                                       tgt_fcpreq->offset, xfrlen);
-               fcpreq->transferred_length += xfrlen;
+               if (fcpreq) {
+                       fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
+                                       fcpreq->first_sgl, tgt_fcpreq->offset,
+                                       xfrlen);
+                       fcpreq->transferred_length += xfrlen;
+               }
                break;
 
        case NVMET_FCOP_READDATA:
        case NVMET_FCOP_READDATA_RSP:
                xfrlen = tgt_fcpreq->transfer_length;
-               fcloop_fcp_copy_data(op, tgt_fcpreq->sg, fcpreq->first_sgl,
-                                       tgt_fcpreq->offset, xfrlen);
-               fcpreq->transferred_length += xfrlen;
+               if (fcpreq) {
+                       fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
+                                       fcpreq->first_sgl, tgt_fcpreq->offset,
+                                       xfrlen);
+                       fcpreq->transferred_length += xfrlen;
+               }
                if (op == NVMET_FCOP_READDATA)
                        break;
 
                /* Fall-Thru to RSP handling */
 
        case NVMET_FCOP_RSP:
-               rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
-                               fcpreq->rsplen : tgt_fcpreq->rsplen);
-               memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
-               if (rsplen < tgt_fcpreq->rsplen)
-                       fcp_err = -E2BIG;
-               fcpreq->rcv_rsplen = rsplen;
-               fcpreq->status = 0;
+               if (fcpreq) {
+                       rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
+                                       fcpreq->rsplen : tgt_fcpreq->rsplen);
+                       memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
+                       if (rsplen < tgt_fcpreq->rsplen)
+                               fcp_err = -E2BIG;
+                       fcpreq->rcv_rsplen = rsplen;
+                       fcpreq->status = 0;
+               }
                tfcp_req->status = 0;
                break;
 
-       case NVMET_FCOP_ABORT:
-               tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED;
-               break;
-
        default:
                fcp_err = -EINVAL;
                break;
        }
 
+       spin_lock(&tfcp_req->reqlock);
+       tfcp_req->active = false;
+       spin_unlock(&tfcp_req->reqlock);
+
        tgt_fcpreq->transferred_length = xfrlen;
        tgt_fcpreq->fcp_error = fcp_err;
        tgt_fcpreq->done(tgt_fcpreq);
        return 0;
 }
 
+static void
+fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
+                       struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+{
+       struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
+       int active;
+
+       /*
+        * mark aborted only in case there were 2 threads in transport
+        * (one doing io, other doing abort) and only kills ops posted
+        * after the abort request
+        */
+       spin_lock(&tfcp_req->reqlock);
+       active = tfcp_req->active;
+       tfcp_req->aborted = true;
+       spin_unlock(&tfcp_req->reqlock);
+
+       tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED;
+
+       /*
+        * nothing more to do. If io wasn't active, the transport should
+        * immediately call the req_release. If it was active, the op
+        * will complete, and the lldd should call req_release.
+        */
+}
+
 static void
 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
                        struct nvmefc_tgt_fcp_req *tgt_fcpreq)
                        void *hw_queue_handle,
                        struct nvmefc_fcp_req *fcpreq)
 {
+       struct fcloop_rport *rport = remoteport->private;
+       struct fcloop_ini_fcpreq *inireq = fcpreq->private;
+       struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
+
+       if (!tfcp_req)
+               /* abort has already been called */
+               return;
+
+       if (rport->targetport)
+               nvmet_fc_rcv_fcp_abort(rport->targetport,
+                                       &tfcp_req->tgt_fcp_req);
+
+       /* break initiator/target relationship for io */
+       spin_lock(&tfcp_req->reqlock);
+       inireq->tfcp_req = NULL;
+       tfcp_req->fcpreq = NULL;
+       spin_unlock(&tfcp_req->reqlock);
+
+       /* post the aborted io completion */
+       fcpreq->status = -ECANCELED;
+       schedule_work(&inireq->iniwork);
 }
 
 static void
        .targetport_delete      = fcloop_targetport_delete,
        .xmt_ls_rsp             = fcloop_xmt_ls_rsp,
        .fcp_op                 = fcloop_fcp_op,
+       .fcp_abort              = fcloop_tgt_fcp_abort,
        .fcp_req_release        = fcloop_fcp_req_release,
        .max_hw_queues          = FCLOOP_HW_QUEUES,
        .max_sgl_segments       = FCLOOP_SGL_SEGS,
 
        }
 #endif
 
-       if (rsp->op == NVMET_FCOP_ABORT) {
-               lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-                               "6103 Abort op: oxri x%x %d cnt %d\n",
-                               ctxp->oxid, ctxp->state, ctxp->entry_cnt);
-
-               lpfc_nvmeio_data(phba, "NVMET FCP ABRT: "
-                                "xri x%x state x%x cnt x%x\n",
-                                ctxp->oxid, ctxp->state, ctxp->entry_cnt);
-
-               atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
-               ctxp->entry_cnt++;
-               ctxp->flag |= LPFC_NVMET_ABORT_OP;
-               if (ctxp->flag & LPFC_NVMET_IO_INP)
-                       lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
-                                                      ctxp->oxid);
-               else
-                       lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
-                                                        ctxp->oxid);
-               return 0;
-       }
-
        /* Sanity check */
        if (ctxp->state == LPFC_NVMET_STE_ABORT) {
                atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
        complete(&tport->tport_unreg_done);
 }
 
+static void
+lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
+                        struct nvmefc_tgt_fcp_req *req)
+{
+       struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
+       struct lpfc_nvmet_rcv_ctx *ctxp =
+               container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+       struct lpfc_hba *phba = ctxp->phba;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+                       "6103 Abort op: oxri x%x %d cnt %d\n",
+                       ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+
+       lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x state x%x cnt x%x\n",
+                        ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+
+       atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
+       ctxp->entry_cnt++;
+       ctxp->flag |= LPFC_NVMET_ABORT_OP;
+       if (ctxp->flag & LPFC_NVMET_IO_INP)
+               lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+                                              ctxp->oxid);
+       else
+               lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+                                                ctxp->oxid);
+}
+
 static void
 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
                           struct nvmefc_tgt_fcp_req *rsp)
        .targetport_delete = lpfc_nvmet_targetport_delete,
        .xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
        .fcp_op         = lpfc_nvmet_xmt_fcp_op,
+       .fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
        .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
 
        .max_hw_queues  = 1,
 
                                         * rsp as well
                                         */
        NVMET_FCOP_RSP          = 4,    /* send rsp frame */
-       NVMET_FCOP_ABORT        = 5,    /* abort exchange via ABTS */
-       NVMET_FCOP_BA_ACC       = 6,    /* send BA_ACC */
-       NVMET_FCOP_BA_RJT       = 7,    /* send BA_RJT */
 };
 
 /**
  *     upon compeletion of the operation.  The nvmet-fc layer will also set a
  *     private pointer for its own use in the done routine.
  *
- * Note: the LLDD must never fail a NVMET_FCOP_ABORT request !!
- *
  * Values set by the NVMET-FC layer prior to calling the LLDD fcp_op
  * entrypoint.
  * @op:       Indicates the FCP IU operation to perform (see NVMET_FCOP_xxx)
  *           or upon success/failure of FCP_CONF if it is supported, the
  *           LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
  *           consider the operation complete.
- *         NVMET_FCOP_ABORT: the LLDD is to terminate the exchange
- *           corresponding to the fcp operation. The LLDD shall send
- *           ABTS and follow FC exchange abort-multi rules, including
- *           ABTS retries and possible logout.
  *       Upon completing the indicated operation, the LLDD is to set the
  *       status fields for the operation (tranferred_length and fcp_error
  *       status) in the request, then call the "done" routine
  *       Returns 0 on success, -<errno> on failure (Ex: -EIO)
  *       Entrypoint is Mandatory.
  *
+ * @fcp_abort:  Called by the transport to abort an active command.
+ *       The command may be in-between operations (nothing active in LLDD)
+ *       or may have an active WRITEDATA operation pending. The LLDD is to
+ *       initiate the ABTS process for the command and return from the
+ *       callback. The ABTS does not need to be complete on the command.
+ *       The fcp_abort callback inherently cannot fail. After the
+ *       fcp_abort() callback completes, the transport will wait for any
+ *       outstanding operation (if there was one) to complete, then will
+ *       call the fcp_req_release() callback to return the command's
+ *       exchange context back to the LLDD.
+ *
  * @fcp_req_release:  Called by the transport to return a nvmefc_tgt_fcp_req
  *       to the LLDD after all operations on the fcp operation are complete.
  *       This may be due to the command completing or upon completion of
                                struct nvmefc_tgt_ls_req *tls_req);
        int (*fcp_op)(struct nvmet_fc_target_port *tgtport,
                                struct nvmefc_tgt_fcp_req *fcpreq);
+       void (*fcp_abort)(struct nvmet_fc_target_port *tgtport,
+                               struct nvmefc_tgt_fcp_req *fcpreq);
        void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
                                struct nvmefc_tgt_fcp_req *fcpreq);
 
                        struct nvmefc_tgt_fcp_req *fcpreq,
                        void *cmdiubuf, u32 cmdiubuf_len);
 
+void nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *tgtport,
+                       struct nvmefc_tgt_fcp_req *fcpreq);
+
 #endif /* _NVME_FC_DRIVER_H */