bool has_sched,
                                       struct request *rq)
 {
-       /* dispatch flush rq directly */
-       if (rq->rq_flags & RQF_FLUSH_SEQ) {
-               spin_lock(&hctx->lock);
-               list_add(&rq->queuelist, &hctx->dispatch);
-               spin_unlock(&hctx->lock);
+       /*
+        * dispatch flush and passthrough rq directly
+        *
+        * passthrough request has to be added to hctx->dispatch directly.
+        * For some reason, device may be in one situation which can't
+        * handle FS request, so STS_RESOURCE is always returned and the
+        * FS request will be added to hctx->dispatch. However passthrough
+        * request may be required at that time for fixing the problem. If
+        * passthrough request is added to scheduler queue, there isn't any
+        * chance to dispatch it given we prioritize requests in hctx->dispatch.
+        */
+       if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
                return true;
-       }
 
        if (has_sched)
                rq->rq_flags |= RQF_SORTED;
 
        WARN_ON(e && (rq->tag != -1));
 
-       if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
+       if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
+               blk_mq_request_bypass_insert(rq, at_head, false);
                goto run;
+       }
 
        if (e && e->type->ops.insert_requests) {
                LIST_HEAD(list);
 
                 * merge.
                 */
                if (rq->rq_flags & RQF_DONTPREP)
-                       blk_mq_request_bypass_insert(rq, false);
+                       blk_mq_request_bypass_insert(rq, false, false);
                else
                        blk_mq_sched_insert_request(rq, true, false, false);
        }
                        q->mq_ops->commit_rqs(hctx);
 
                spin_lock(&hctx->lock);
-               list_splice_init(list, &hctx->dispatch);
+               list_splice_tail_init(list, &hctx->dispatch);
                spin_unlock(&hctx->lock);
 
                /*
  * Should only be used carefully, when the caller knows we want to
  * bypass a potential IO scheduler on the target device.
  */
-void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
+void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
+                                 bool run_queue)
 {
        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 
        spin_lock(&hctx->lock);
-       list_add_tail(&rq->queuelist, &hctx->dispatch);
+       if (at_head)
+               list_add(&rq->queuelist, &hctx->dispatch);
+       else
+               list_add_tail(&rq->queuelist, &hctx->dispatch);
        spin_unlock(&hctx->lock);
 
        if (run_queue)
        if (bypass_insert)
                return BLK_STS_RESOURCE;
 
-       blk_mq_request_bypass_insert(rq, run_queue);
+       blk_mq_request_bypass_insert(rq, false, run_queue);
        return BLK_STS_OK;
 }
 
 
        ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
        if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
-               blk_mq_request_bypass_insert(rq, true);
+               blk_mq_request_bypass_insert(rq, false, true);
        else if (ret != BLK_STS_OK)
                blk_mq_end_request(rq, ret);
 
                if (ret != BLK_STS_OK) {
                        if (ret == BLK_STS_RESOURCE ||
                                        ret == BLK_STS_DEV_RESOURCE) {
-                               blk_mq_request_bypass_insert(rq,
+                               blk_mq_request_bypass_insert(rq, false,
                                                        list_empty(list));
                                break;
                        }
 
  */
 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
                                bool at_head);
-void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
+void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
+                                 bool run_queue);
 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
                                struct list_head *list);