static void blk_mq_sysfs_release(struct kobject *kobj)
 {
+       struct request_queue *q;
+
+       q = container_of(kobj, struct request_queue, mq_kobj);
+       free_percpu(q->queue_ctx);
+}
+
+static void blk_mq_ctx_release(struct kobject *kobj)
+{
+       struct blk_mq_ctx *ctx;
+
+       ctx = container_of(kobj, struct blk_mq_ctx, kobj);
+       kobject_put(&ctx->queue->mq_kobj);
+}
+
+static void blk_mq_hctx_release(struct kobject *kobj)
+{
+       struct blk_mq_hw_ctx *hctx;
+
+       hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
+       kfree(hctx);
 }
 
 struct blk_mq_ctx_sysfs_entry {
 static struct kobj_type blk_mq_ctx_ktype = {
        .sysfs_ops      = &blk_mq_sysfs_ops,
        .default_attrs  = default_ctx_attrs,
-       .release        = blk_mq_sysfs_release,
+       .release        = blk_mq_ctx_release,
 };
 
 static struct kobj_type blk_mq_hw_ktype = {
        .sysfs_ops      = &blk_mq_hw_sysfs_ops,
        .default_attrs  = default_hw_ctx_attrs,
-       .release        = blk_mq_sysfs_release,
+       .release        = blk_mq_hctx_release,
 };
 
 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
                return ret;
 
        hctx_for_each_ctx(hctx, ctx, i) {
+               kobject_get(&q->mq_kobj);
                ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
                if (ret)
                        break;
 
        struct blk_mq_hw_ctx *hctx;
        unsigned int i;
 
-       queue_for_each_hw_ctx(q, hctx, i) {
+       queue_for_each_hw_ctx(q, hctx, i)
                free_cpumask_var(hctx->cpumask);
-               kfree(hctx);
-       }
 }
 
 static int blk_mq_init_hctx(struct request_queue *q,
 
        percpu_ref_exit(&q->mq_usage_counter);
 
-       free_percpu(q->queue_ctx);
        kfree(q->queue_hw_ctx);
        kfree(q->mq_map);
 
-       q->queue_ctx = NULL;
        q->queue_hw_ctx = NULL;
        q->mq_map = NULL;