struct blk_mq_hw_ctx *hctx;
unsigned long i;
- lockdep_assert_held(&q->sysfs_dir_lock);
-
+ mutex_lock(&q->sysfs_dir_lock);
if (!q->mq_sysfs_init_done)
- return;
+ goto unlock;
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
+
+unlock:
+ mutex_unlock(&q->sysfs_dir_lock);
}
int blk_mq_sysfs_register_hctxs(struct request_queue *q)
unsigned long i;
int ret = 0;
- lockdep_assert_held(&q->sysfs_dir_lock);
-
+ mutex_lock(&q->sysfs_dir_lock);
if (!q->mq_sysfs_init_done)
- return ret;
+ goto unlock;
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
break;
}
+unlock:
+ mutex_unlock(&q->sysfs_dir_lock);
+
return ret;
}
unsigned long i, j;
/* protect against switching io scheduler */
- lockdep_assert_held(&q->sysfs_lock);
-
+ mutex_lock(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
int old_node;
int node = blk_mq_get_hctx_node(set, i);
xa_for_each_start(&q->hctx_table, j, hctx, j)
blk_mq_exit_hctx(q, set, hctx, j);
+ mutex_unlock(&q->sysfs_lock);
/* unregister cpuhp callbacks for exited hctxs */
blk_mq_remove_hw_queues_cpuhp(q);
xa_init(&q->hctx_table);
- mutex_lock(&q->sysfs_lock);
-
blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues)
goto err_hctxs;
- mutex_unlock(&q->sysfs_lock);
-
INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
return 0;
err_hctxs:
- mutex_unlock(&q->sysfs_lock);
blk_mq_release(q);
err_exit:
q->mq_ops = NULL;
return false;
/* q->elevator needs protection from ->sysfs_lock */
- lockdep_assert_held(&q->sysfs_lock);
+ mutex_lock(&q->sysfs_lock);
/* the check has to be done with holding sysfs_lock */
if (!q->elevator) {
kfree(qe);
- goto out;
+ goto unlock;
}
INIT_LIST_HEAD(&qe->node);
__elevator_get(qe->type);
list_add(&qe->node, head);
elevator_disable(q);
-out:
+unlock:
+ mutex_unlock(&q->sysfs_lock);
+
return true;
}
list_del(&qe->node);
kfree(qe);
+ mutex_lock(&q->sysfs_lock);
elevator_switch(q, t);
/* drop the reference acquired in blk_mq_elv_switch_none */
elevator_put(t);
+ mutex_unlock(&q->sysfs_lock);
}
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
return;
- list_for_each_entry(q, &set->tag_list, tag_set_list) {
- mutex_lock(&q->sysfs_dir_lock);
- mutex_lock(&q->sysfs_lock);
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_freeze_queue(q);
- }
/*
* Switch IO scheduler to 'none', cleaning up the data associated
* with the previous scheduler. We will switch back once we are done
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_elv_switch_back(&head, q);
- list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_unfreeze_queue(q);
- mutex_unlock(&q->sysfs_lock);
- mutex_unlock(&q->sysfs_dir_lock);
- }
/* Free the excess tags when nr_hw_queues shrink. */
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)