__blk_mq_sched_assign_ioc(q, rq, bio, ioc);
 }
 
+/*
+ * Mark a hardware queue as needing a restart. For shared queues, maintain
+ * a count of how many hardware queues are marked for restart.
+ */
+static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
+{
+       if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+               return;
+
+       if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
+               struct request_queue *q = hctx->queue;
+
+               if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+                       atomic_inc(&q->shared_hctx_restart);
+       } else
+               set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+}
+
+static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
+{
+       if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+               return false;
+
+       if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
+               struct request_queue *q = hctx->queue;
+
+               if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+                       atomic_dec(&q->shared_hctx_restart);
+       } else
+               clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+
+       if (blk_mq_hctx_has_pending(hctx)) {
+               blk_mq_run_hw_queue(hctx, true);
+               return true;
+       }
+
+       return false;
+}
+
 struct request *blk_mq_sched_get_request(struct request_queue *q,
                                         struct bio *bio,
                                         unsigned int op,
        return true;
 }
 
-static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
-{
-       if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
-               clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
-               if (blk_mq_hctx_has_pending(hctx)) {
-                       blk_mq_run_hw_queue(hctx, true);
-                       return true;
-               }
-       }
-       return false;
-}
-
 /**
  * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
  * @pos:    loop cursor.
        unsigned int i, j;
 
        if (set->flags & BLK_MQ_F_TAG_SHARED) {
+               /*
+                * If this is 0, then we know that no hardware queues
+                * have RESTART marked. We're done.
+                */
+               if (!atomic_read(&queue->shared_hctx_restart))
+                       return;
+
                rcu_read_lock();
                list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
                                           tag_set_list) {
 
        return false;
 }
 
-/*
- * Mark a hardware queue as needing a restart.
- */
-static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
-{
-       if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
-               set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
-}
-
 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
 {
        return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
 
        }
 }
 
+/*
+ * Caller needs to ensure that we're either frozen/quiesced, or that
+ * the queue isn't live yet.
+ */
 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
 {
        struct blk_mq_hw_ctx *hctx;
        int i;
 
        queue_for_each_hw_ctx(q, hctx, i) {
-               if (shared)
+               if (shared) {
+                       if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+                               atomic_inc(&q->shared_hctx_restart);
                        hctx->flags |= BLK_MQ_F_TAG_SHARED;
-               else
+               } else {
+                       if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+                               atomic_dec(&q->shared_hctx_restart);
                        hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
+               }
        }
 }
 
-static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
+static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
+                                       bool shared)
 {
        struct request_queue *q;