struct request_queue *q = rq->q;
        struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
  
-       rq->cmd_flags &= ~REQ_STARTED;
 +      /*
 +       * Updating q->in_flight[] here for making this tag usable
 +       * early. Because in blk_queue_start_tag(),
 +       * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
 +       * reserve tags for sync I/O.
 +       *
 +       * More importantly this way can avoid the following I/O
 +       * deadlock:
 +       *
 +       * - suppose there are 40 fua requests comming to flush queue
 +       *   and queue depth is 31
 +       * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
 +       *   tag for async I/O any more
 +       * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
 +       *   and flush_data_end_io() is called
 +       * - the other rqs still can't go ahead if not updating
 +       *   q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
 +       *   are held in flush data queue and make no progress of
 +       *   handling post flush rq
 +       * - only after the post flush rq is handled, all these rqs
 +       *   can be completed
 +       */
 +
 +      elv_completed_request(q, rq);
 +
 +      /* for avoiding double accounting */
++      rq->rq_flags &= ~RQF_STARTED;
 +
        /*
         * After populating an empty queue, kick it to avoid stall.  Read
         * the comment in flush_end_io().
 
        return 1;
  
   cmd_abort:
 -      if (mmc_packed_cmd(mq_rq->cmd_type)) {
 -              mmc_blk_abort_packed_req(mq_rq);
 -      } else {
 -              if (mmc_card_removed(card))
 -                      req->rq_flags |= RQF_QUIET;
 -              while (ret)
 -                      ret = blk_end_request(req, -EIO,
 -                                      blk_rq_cur_bytes(req));
 -      }
 +      if (mmc_card_removed(card))
-               req->cmd_flags |= REQ_QUIET;
++              req->rq_flags |= RQF_QUIET;
 +      while (ret)
 +              ret = blk_end_request(req, -EIO,
 +                              blk_rq_cur_bytes(req));
  
   start_new_req:
        if (rqc) {
                if (mmc_card_removed(card)) {
-                       rqc->cmd_flags |= REQ_QUIET;
+                       rqc->rq_flags |= RQF_QUIET;
                        blk_end_request_all(rqc, -EIO);
                } else {
 -                      /*
 -                       * If current request is packed, it needs to put back.
 -                       */
 -                      if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
 -                              mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
 -
                        mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
                        mmc_start_req(card->host,
                                      &mq->mqrq_cur->mmc_active, NULL);
 
        NVME_SC_REFTAG_CHECK            = 0x284,
        NVME_SC_COMPARE_FAILED          = 0x285,
        NVME_SC_ACCESS_DENIED           = 0x286,
 +      NVME_SC_UNWRITTEN_BLOCK         = 0x287,
  
        NVME_SC_DNR                     = 0x4000,
+ 
+ 
+       /*
+        * FC Transport-specific error status values for NVME commands
+        *
+        * Transport-specific status code values must be in the range 0xB0..0xBF
+        */
+ 
+       /* Generic FC failure - catchall */
+       NVME_SC_FC_TRANSPORT_ERROR      = 0x00B0,
+ 
+       /* I/O failure due to FC ABTS'd */
+       NVME_SC_FC_TRANSPORT_ABORTED    = 0x00B1,
  };
  
  struct nvme_completion {