*/
 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 {
-       if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
-           !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
-               atomic_inc(&hctx->tags->active_queues);
+       if (blk_mq_is_sbitmap_shared(hctx->flags)) {
+               struct request_queue *q = hctx->queue;
+               struct blk_mq_tag_set *set = q->tag_set;
+
+               if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
+                   !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
+                       atomic_inc(&set->active_queues_shared_sbitmap);
+       } else {
+               if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
+                   !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+                       atomic_inc(&hctx->tags->active_queues);
+       }
 
        return true;
 }
 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 {
        struct blk_mq_tags *tags = hctx->tags;
-
-       if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
-               return;
-
-       atomic_dec(&tags->active_queues);
+       struct request_queue *q = hctx->queue;
+       struct blk_mq_tag_set *set = q->tag_set;
+
+       if (blk_mq_is_sbitmap_shared(hctx->flags)) {
+               if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
+                                       &q->queue_flags))
+                       return;
+               atomic_dec(&set->active_queues_shared_sbitmap);
+       } else {
+               if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+                       return;
+               atomic_dec(&tags->active_queues);
+       }
 
        blk_mq_tag_wakeup_all(tags, false);
 }
 
 
        if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
                return true;
-       if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
-               return true;
 
        /*
         * Don't try dividing an ant
        if (bt->sb.depth == 1)
                return true;
 
-       users = atomic_read(&hctx->tags->active_queues);
+       if (blk_mq_is_sbitmap_shared(hctx->flags)) {
+               struct request_queue *q = hctx->queue;
+               struct blk_mq_tag_set *set = q->tag_set;
+
+               if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &q->queue_flags))
+                       return true;
+               users = atomic_read(&set->active_queues_shared_sbitmap);
+       } else {
+               if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+                       return true;
+               users = atomic_read(&hctx->tags->active_queues);
+       }
+
        if (!users)
                return true;
 
 
 #define QUEUE_FLAG_PCI_P2PDMA  25      /* device supports PCI p2p requests */
 #define QUEUE_FLAG_ZONE_RESETALL 26    /* supports Zone Reset All */
 #define QUEUE_FLAG_RQ_ALLOC_TIME 27    /* record rq->alloc_time_ns */
+#define QUEUE_FLAG_HCTX_ACTIVE 28      /* at least one blk-mq hctx is active */
 
 #define QUEUE_FLAG_MQ_DEFAULT  ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_SAME_COMP))