queue_max_sectors(bdev_get_queue(bdev)));
                return put_ushort(argp, max_sectors);
        case BLKROTATIONAL:
-               return put_ushort(argp, !blk_queue_nonrot(bdev_get_queue(bdev)));
+               return put_ushort(argp, !bdev_nonrot(bdev));
        case BLKRASET:
        case BLKFRASET:
                if(!capable(CAP_SYS_ADMIN))
 
 
        /* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
        if (file_bdev)
-               nonrot = blk_queue_nonrot(bdev_get_queue(file_bdev));
+               nonrot = bdev_nonrot(file_bdev);
 
        if (nonrot)
                blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
 
 static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
                                sector_t start, sector_t len, void *data)
 {
-       struct request_queue *q = bdev_get_queue(dev->bdev);
-
-       return !blk_queue_nonrot(q);
+       return !bdev_nonrot(dev->bdev);
 }
 
 static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
 
                bool nonrot = true;
 
                rdev_for_each(rdev, mddev) {
-                       if (rdev->raid_disk >= 0 &&
-                           !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
+                       if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) {
                                nonrot = false;
                                break;
                        }
 
                        /* At least two disks to choose from so failfast is OK */
                        set_bit(R1BIO_FailFast, &r1_bio->state);
 
-               nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
+               nonrot = bdev_nonrot(rdev->bdev);
                has_nonrot_disk |= nonrot;
                pending = atomic_read(&rdev->nr_pending);
                dist = abs(this_sector - conf->mirrors[disk].head_position);
 
                if (!do_balance)
                        break;
 
-               nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
+               nonrot = bdev_nonrot(rdev->bdev);
                has_nonrot_disk |= nonrot;
                pending = atomic_read(&rdev->nr_pending);
                if (min_pending > pending && nonrot) {
 
        rdev_for_each(rdev, mddev) {
                if (test_bit(Journal, &rdev->flags))
                        continue;
-               if (blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
+               if (bdev_nonrot(rdev->bdev)) {
                        conf->batch_bio_dispatch = false;
                        break;
                }
 
        inode = file->f_mapping->host;
        if (S_ISBLK(inode->i_mode)) {
                struct block_device *bdev = I_BDEV(inode);
-               struct request_queue *q = bdev_get_queue(bdev);
                unsigned long long dev_size;
 
                fd_dev->fd_block_size = bdev_logical_block_size(bdev);
                 */
                dev->dev_attrib.max_write_same_len = 0xFFFF;
 
-               if (blk_queue_nonrot(q))
+               if (bdev_nonrot(bdev))
                        dev->dev_attrib.is_nonrot = 1;
        } else {
                if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
 
        else
                dev->dev_attrib.max_write_same_len = 0xFFFF;
 
-       if (blk_queue_nonrot(q))
+       if (bdev_nonrot(bd))
                dev->dev_attrib.is_nonrot = 1;
 
        bi = bdev_get_integrity(bd);
 
                        set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
        }
 
-       if (!blk_queue_nonrot(bdev_get_queue(bdev)))
+       if (!bdev_nonrot(bdev))
                fs_devices->rotating = true;
 
        device->bdev = bdev;
 
        atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
 
-       if (!blk_queue_nonrot(bdev_get_queue(bdev)))
+       if (!bdev_nonrot(bdev))
                fs_devices->rotating = true;
 
        orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
 
                spin_lock_init(&lg->lg_prealloc_lock);
        }
 
-       if (blk_queue_nonrot(bdev_get_queue(sb->s_bdev)))
+       if (bdev_nonrot(sb->s_bdev))
                sbi->s_mb_max_linear_groups = 0;
        else
                sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
 
        return 0;
 }
 
+static inline bool bdev_nonrot(struct block_device *bdev)
+{
+       return blk_queue_nonrot(bdev_get_queue(bdev));
+}
+
 static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
 {
        struct request_queue *q = bdev_get_queue(bdev);
 
        if (p->flags & SWP_CONTINUED)
                free_swap_count_continuations(p);
 
-       if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev)))
+       if (!p->bdev || !bdev_nonrot(p->bdev))
                atomic_dec(&nr_rotate_swap);
 
        mutex_lock(&swapon_mutex);
        if (p->bdev && p->bdev->bd_disk->fops->rw_page)
                p->flags |= SWP_SYNCHRONOUS_IO;
 
-       if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
+       if (p->bdev && bdev_nonrot(p->bdev)) {
                int cpu;
                unsigned long ci, nr_cluster;