}
 EXPORT_SYMBOL(scsi_host_get);
 
+struct scsi_host_mq_in_flight {
+       int cnt;
+};
+
+static void scsi_host_check_in_flight(struct request *rq, void *data,
+               bool reserved)
+{
+       struct scsi_host_mq_in_flight *in_flight = data;
+
+       if (blk_mq_request_started(rq))
+               in_flight->cnt++;
+}
+
 /**
  * scsi_host_busy - Return the host busy counter
  * @shost:     Pointer to Scsi_Host to inc.
  **/
 int scsi_host_busy(struct Scsi_Host *shost)
 {
-       return atomic_read(&shost->host_busy);
+       struct scsi_host_mq_in_flight in_flight = {
+               .cnt = 0,
+       };
+
+       if (!shost->use_blk_mq)
+               return atomic_read(&shost->host_busy);
+
+       blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight,
+                       &in_flight);
+       return in_flight.cnt;
 }
 EXPORT_SYMBOL(scsi_host_busy);
 
 
        unsigned long flags;
 
        rcu_read_lock();
-       atomic_dec(&shost->host_busy);
+       if (!shost->use_blk_mq)
+               atomic_dec(&shost->host_busy);
        if (unlikely(scsi_host_in_recovery(shost))) {
                spin_lock_irqsave(shost->host_lock, flags);
                if (shost->host_failed || shost->host_eh_scheduled)
 
 static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
 {
-       if (shost->can_queue > 0 &&
+       /*
+        * blk-mq can handle host queue busy efficiently via host-wide driver
+        * tag allocation
+        */
+
+       if (!shost->use_blk_mq && shost->can_queue > 0 &&
            atomic_read(&shost->host_busy) >= shost->can_queue)
                return true;
        if (atomic_read(&shost->host_blocked) > 0)
        if (scsi_host_in_recovery(shost))
                return 0;
 
-       busy = atomic_inc_return(&shost->host_busy) - 1;
+       if (!shost->use_blk_mq)
+               busy = atomic_inc_return(&shost->host_busy) - 1;
+       else
+               busy = 0;
        if (atomic_read(&shost->host_blocked) > 0) {
-               if (busy)
+               if (busy || scsi_host_busy(shost))
                        goto starved;
 
                /*
                                     "unblocking host at zero depth\n"));
        }
 
-       if (shost->can_queue > 0 && busy >= shost->can_queue)
+       if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue)
                goto starved;
        if (shost->host_self_blocked)
                goto starved;
         * with the locks as normal issue path does.
         */
        atomic_inc(&sdev->device_busy);
-       atomic_inc(&shost->host_busy);
+
+       if (!shost->use_blk_mq)
+               atomic_inc(&shost->host_busy);
        if (starget->can_queue > 0)
                atomic_inc(&starget->target_busy);