iv = bip->bip_vec + bip->bip_vcnt;
 
        if (bip->bip_vcnt &&
-           bvec_gap_to_prev(bio->bi_bdev->bd_disk->queue,
+           bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev),
                             &bip->bip_vec[bip->bip_vcnt - 1], offset))
                return 0;
 
 
  */
 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
                   char *input, struct blkg_conf_ctx *ctx)
-       __acquires(rcu) __acquires(&bdev->bd_disk->queue->queue_lock)
+       __acquires(rcu) __acquires(&bdev->bd_queue->queue_lock)
 {
        struct block_device *bdev;
        struct request_queue *q;
        if (IS_ERR(bdev))
                return PTR_ERR(bdev);
 
-       q = bdev->bd_disk->queue;
+       q = bdev_get_queue(bdev);
 
        rcu_read_lock();
        spin_lock_irq(&q->queue_lock);
  * with blkg_conf_prep().
  */
 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
-       __releases(&ctx->bdev->bd_disk->queue->queue_lock) __releases(rcu)
+       __releases(&ctx->bdev->bd_queue->queue_lock) __releases(rcu)
 {
-       spin_unlock_irq(&ctx->bdev->bd_disk->queue->queue_lock);
+       spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock);
        rcu_read_unlock();
        blkdev_put_no_open(ctx->bdev);
 }
        while ((dev = class_dev_iter_next(&iter))) {
                struct block_device *bdev = dev_to_bdev(dev);
                struct blkcg_gq *blkg =
-                       blk_queue_root_blkg(bdev->bd_disk->queue);
+                       blk_queue_root_blkg(bdev_get_queue(bdev));
                struct blkg_iostat tmp;
                int cpu;
 
 
        rcu_read_lock();
        blkg = blkg_lookup_create(css_to_blkcg(css),
-                                 bio->bi_bdev->bd_disk->queue);
+                                 bdev_get_queue(bio->bi_bdev));
        while (blkg) {
                if (blkg_tryget(blkg)) {
                        ret_blkg = blkg;
        if (css && css->parent) {
                bio->bi_blkg = blkg_tryget_closest(bio, css);
        } else {
-               blkg_get(bio->bi_bdev->bd_disk->queue->root_blkg);
-               bio->bi_blkg = bio->bi_bdev->bd_disk->queue->root_blkg;
+               blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg);
+               bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg;
        }
 }
 EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
 
         * Success if device supports the encryption context, or if we succeeded
         * in falling back to the crypto API.
         */
-       if (blk_ksm_crypto_cfg_supported(bio->bi_bdev->bd_disk->queue->ksm,
+       if (blk_ksm_crypto_cfg_supported(bdev_get_queue(bio->bi_bdev)->ksm,
                                         &bc_key->crypto_cfg))
                return true;
 
 
        if (IS_ERR(bdev))
                return PTR_ERR(bdev);
 
-       ioc = q_to_ioc(bdev->bd_disk->queue);
+       ioc = q_to_ioc(bdev_get_queue(bdev));
        if (!ioc) {
-               ret = blk_iocost_init(bdev->bd_disk->queue);
+               ret = blk_iocost_init(bdev_get_queue(bdev));
                if (ret)
                        goto err;
-               ioc = q_to_ioc(bdev->bd_disk->queue);
+               ioc = q_to_ioc(bdev_get_queue(bdev));
        }
 
        spin_lock_irq(&ioc->lock);
        if (IS_ERR(bdev))
                return PTR_ERR(bdev);
 
-       ioc = q_to_ioc(bdev->bd_disk->queue);
+       ioc = q_to_ioc(bdev_get_queue(bdev));
        if (!ioc) {
-               ret = blk_iocost_init(bdev->bd_disk->queue);
+               ret = blk_iocost_init(bdev_get_queue(bdev));
                if (ret)
                        goto err;
-               ioc = q_to_ioc(bdev->bd_disk->queue);
+               ioc = q_to_ioc(bdev_get_queue(bdev));
        }
 
        spin_lock_irq(&ioc->lock);
 
  */
 void blk_mq_submit_bio(struct bio *bio)
 {
-       struct request_queue *q = bio->bi_bdev->bd_disk->queue;
+       struct request_queue *q = bdev_get_queue(bio->bi_bdev);
        const int is_sync = op_is_sync(bio->bi_opf);
        const int is_flush_fua = op_is_flush(bio->bi_opf);
        struct request *rq;
 
 
 bool __blk_throtl_bio(struct bio *bio)
 {
-       struct request_queue *q = bio->bi_bdev->bd_disk->queue;
+       struct request_queue *q = bdev_get_queue(bio->bi_bdev);
        struct blkcg_gq *blkg = bio->bi_blkg;
        struct throtl_qnode *qn = NULL;
        struct throtl_grp *tg = blkg_to_tg(blkg);
 
                       struct device_attribute *attr, char *buf)
 {
        struct block_device *bdev = dev_to_bdev(dev);
-       struct request_queue *q = bdev->bd_disk->queue;
+       struct request_queue *q = bdev_get_queue(bdev);
        struct disk_stats stat;
        unsigned int inflight;
 
                           char *buf)
 {
        struct block_device *bdev = dev_to_bdev(dev);
-       struct request_queue *q = bdev->bd_disk->queue;
+       struct request_queue *q = bdev_get_queue(bdev);
        unsigned int inflight[2];
 
        if (queue_is_mq(q))
 
        struct block_device *bdev = dev_to_bdev(dev);
 
        return sprintf(buf, "%u\n",
-               queue_limit_alignment_offset(&bdev->bd_disk->queue->limits,
+               queue_limit_alignment_offset(&bdev_get_queue(bdev)->limits,
                                bdev->bd_start_sect));
 }
 
        struct block_device *bdev = dev_to_bdev(dev);
 
        return sprintf(buf, "%u\n",
-               queue_limit_discard_alignment(&bdev->bd_disk->queue->limits,
+               queue_limit_discard_alignment(&bdev_get_queue(bdev)->limits,
                                bdev->bd_start_sect));
 }