static int blk_mq_queue_enter(struct request_queue *q)
 {
-       int ret;
-
-       __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
-       smp_mb();
-
-       /* we have problems freezing the queue if it's initializing */
-       if (!q->mq_freeze_depth)
-               return 0;
-
-       __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
+       while (true) {
+               int ret;
 
-       spin_lock_irq(q->queue_lock);
-       ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
-               !q->mq_freeze_depth || blk_queue_dying(q),
-               *q->queue_lock);
-       /* inc usage with lock hold to avoid freeze_queue runs here */
-       if (!ret && !blk_queue_dying(q))
-               __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
-       else if (blk_queue_dying(q))
-               ret = -ENODEV;
-       spin_unlock_irq(q->queue_lock);
+               if (percpu_ref_tryget_live(&q->mq_usage_counter))
+                       return 0;
 
-       return ret;
+               ret = wait_event_interruptible(q->mq_freeze_wq,
+                               !q->mq_freeze_depth || blk_queue_dying(q));
+               if (blk_queue_dying(q))
+                       return -ENODEV;
+               if (ret)
+                       return ret;
+       }
 }
 
 static void blk_mq_queue_exit(struct request_queue *q)
 {
-       __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
+       percpu_ref_put(&q->mq_usage_counter);
+}
+
+static void blk_mq_usage_counter_release(struct percpu_ref *ref)
+{
+       struct request_queue *q =
+               container_of(ref, struct request_queue, mq_usage_counter);
+
+       wake_up_all(&q->mq_freeze_wq);
 }
 
 /*
        q->mq_freeze_depth++;
        spin_unlock_irq(q->queue_lock);
 
-       while (true) {
-               s64 count;
-
-               spin_lock_irq(q->queue_lock);
-               count = percpu_counter_sum(&q->mq_usage_counter);
-               spin_unlock_irq(q->queue_lock);
-
-               if (count == 0)
-                       break;
-               blk_mq_start_hw_queues(q);
-               msleep(10);
-       }
+       percpu_ref_kill(&q->mq_usage_counter);
+       blk_mq_run_queues(q, false);
+       wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
 }
 
 static void blk_mq_unfreeze_queue(struct request_queue *q)
        wake = !--q->mq_freeze_depth;
        WARN_ON_ONCE(q->mq_freeze_depth < 0);
        spin_unlock_irq(q->queue_lock);
-       if (wake)
+       if (wake) {
+               percpu_ref_reinit(&q->mq_usage_counter);
                wake_up_all(&q->mq_freeze_wq);
+       }
 }
 
 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
        if (!q)
                goto err_hctxs;
 
-       if (percpu_counter_init(&q->mq_usage_counter, 0))
+       if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release))
                goto err_map;
 
        setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
        blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
        blk_mq_free_hw_queues(q, set);
 
-       percpu_counter_destroy(&q->mq_usage_counter);
+       percpu_ref_exit(&q->mq_usage_counter);
 
        free_percpu(q->queue_ctx);
        kfree(q->queue_hw_ctx);
 {
        blk_mq_cpu_init();
 
-       /* Must be called after percpu_counter_hotcpu_callback() */
-       hotcpu_notifier(blk_mq_queue_reinit_notify, -10);
+       hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
 
        return 0;
 }