*
  * Parse per-blkg config update from @input and initialize @ctx with the
  * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
- * part of @input following MAJ:MIN.  This function returns with RCU read
- * lock and queue lock held and must be paired with blkg_conf_finish().
+ * part of @input following MAJ:MIN.  This function returns with queue lock
+ * held and must be paired with blkg_conf_finish().
  */
 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
                   char *input, struct blkg_conf_ctx *ctx)
-       __acquires(rcu) __acquires(&bdev->bd_queue->queue_lock)
+       __acquires(&bdev->bd_queue->queue_lock)
 {
        struct block_device *bdev;
        struct gendisk *disk;
        if (ret)
                goto fail;
 
-       rcu_read_lock();
        spin_lock_irq(&q->queue_lock);
 
        if (!blkcg_policy_enabled(q, pol)) {
 
                /* Drop locks to do new blkg allocation with GFP_KERNEL. */
                spin_unlock_irq(&q->queue_lock);
-               rcu_read_unlock();
 
                new_blkg = blkg_alloc(pos, disk, GFP_KERNEL);
                if (unlikely(!new_blkg)) {
                        goto fail_exit_queue;
                }
 
-               rcu_read_lock();
                spin_lock_irq(&q->queue_lock);
 
                if (!blkcg_policy_enabled(q, pol)) {
        radix_tree_preload_end();
 fail_unlock:
        spin_unlock_irq(&q->queue_lock);
-       rcu_read_unlock();
 fail_exit_queue:
        blk_queue_exit(q);
 fail:
  * with blkg_conf_prep().
  */
 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
-       __releases(&ctx->bdev->bd_queue->queue_lock) __releases(rcu)
+       __releases(&ctx->bdev->bd_queue->queue_lock)
 {
        spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock);
-       rcu_read_unlock();
        blkdev_put_no_open(ctx->bdev);
 }
 EXPORT_SYMBOL_GPL(blkg_conf_finish);