__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
init_waitqueue_head(&q->mq_freeze_wq);
- mutex_init(&q->mq_freeze_lock);
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
{
int freeze_depth;
- mutex_lock(&q->mq_freeze_lock);
freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
if (freeze_depth == 1) {
percpu_ref_kill(&q->q_usage_counter);
blk_mq_run_hw_queues(q, false);
}
- mutex_unlock(&q->mq_freeze_lock);
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
{
int freeze_depth;
- mutex_lock(&q->mq_freeze_lock);
freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
WARN_ON_ONCE(freeze_depth < 0);
if (!freeze_depth) {
percpu_ref_reinit(&q->q_usage_counter);
wake_up_all(&q->mq_freeze_wq);
}
- mutex_unlock(&q->mq_freeze_lock);
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
-#ifndef __GENKSYMS__
- /*
- * Protect concurrent access to mq_usage_counter by
- * percpu_ref_switch_to_percpu(), percpu_ref_kill(), and
- * percpu_ref_reinit().
- */
- struct mutex mq_freeze_lock;
-#endif
};
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */