static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
 {
-       unsigned int discard_granularity =
-               bdev_get_queue(bdev)->limits.discard_granularity;
+       unsigned int discard_granularity = bdev_discard_granularity(bdev);
        sector_t granularity_aligned_sector;
 
        if (bdev_is_partition(bdev))
        }
 
        /* In case the discard granularity isn't set by buggy device driver */
-       if (WARN_ON_ONCE(!q->limits.discard_granularity)) {
+       if (WARN_ON_ONCE(!bdev_discard_granularity(bdev))) {
                char dev_name[BDEVNAME_SIZE];
 
                bdevname(bdev, dev_name);
 
                               struct drbd_backing_dev *nbc)
 {
        struct block_device *bdev = nbc->backing_bdev;
-       struct request_queue *q = bdev->bd_disk->queue;
 
        if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
                disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
        if (disk_conf->rs_discard_granularity) {
                int orig_value = disk_conf->rs_discard_granularity;
                sector_t discard_size = bdev_max_discard_sectors(bdev) << 9;
+               unsigned int discard_granularity = bdev_discard_granularity(bdev);
                int remainder;
 
-               if (q->limits.discard_granularity > disk_conf->rs_discard_granularity)
-                       disk_conf->rs_discard_granularity = q->limits.discard_granularity;
+               if (discard_granularity > disk_conf->rs_discard_granularity)
+                       disk_conf->rs_discard_granularity = discard_granularity;
 
-               remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity;
+               remainder = disk_conf->rs_discard_granularity %
+                               discard_granularity;
                disk_conf->rs_discard_granularity += remainder;
 
                if (disk_conf->rs_discard_granularity > discard_size)
 
 int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags)
 {
        struct block_device *bdev = device->ldev->backing_bdev;
-       struct request_queue *q = bdev_get_queue(bdev);
        sector_t tmp, nr;
        unsigned int max_discard_sectors, granularity;
        int alignment;
                goto zero_out;
 
        /* Zero-sector (unknown) and one-sector granularities are the same.  */
-       granularity = max(q->limits.discard_granularity >> 9, 1U);
+       granularity = max(bdev_discard_granularity(bdev) >> 9, 1U);
        alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
 
        max_discard_sectors = min(bdev_max_discard_sectors(bdev), (1U << 22));
 
                struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
 
                max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
-               granularity = backingq->limits.discard_granularity ?:
+               granularity = bdev_discard_granularity(I_BDEV(inode)) ?:
                        queue_physical_block_size(backingq);
 
        /*
 
 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
                                       struct block_device *bdev)
 {
-       struct request_queue *q = bdev_get_queue(bdev);
        int block_size = bdev_logical_block_size(bdev);
 
        if (!bdev_max_discard_sectors(bdev))
         * Currently hardcoded to 1 in Linux/SCSI code..
         */
        attrib->max_unmap_block_desc_count = 1;
-       attrib->unmap_granularity = q->limits.discard_granularity / block_size;
+       attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size;
        attrib->unmap_granularity_alignment =
                bdev_discard_alignment(bdev) / block_size;
        return true;
 
                                        void __user *arg)
 {
        struct btrfs_device *device;
-       struct request_queue *q;
        struct fstrim_range range;
        u64 minlen = ULLONG_MAX;
        u64 num_devices = 0;
        rcu_read_lock();
        list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
                                dev_list) {
-               if (!device->bdev)
+               if (!device->bdev || !bdev_max_discard_sectors(device->bdev))
                        continue;
-               q = bdev_get_queue(device->bdev);
-               if (bdev_max_discard_sectors(device->bdev)) {
-                       num_devices++;
-                       minlen = min_t(u64, q->limits.discard_granularity,
-                                    minlen);
-               }
+               num_devices++;
+               minlen = min_t(u64, bdev_discard_granularity(device->bdev),
+                                   minlen);
        }
        rcu_read_unlock();
 
 
 
 static int exfat_ioctl_fitrim(struct inode *inode, unsigned long arg)
 {
-       struct request_queue *q = bdev_get_queue(inode->i_sb->s_bdev);
        struct fstrim_range range;
        int ret = 0;
 
                return -EFAULT;
 
        range.minlen = max_t(unsigned int, range.minlen,
-                               q->limits.discard_granularity);
+                               bdev_discard_granularity(inode->i_sb->s_bdev));
 
        ret = exfat_trim_fs(inode, &range);
        if (ret < 0)
 
  */
 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
 {
-       struct request_queue *q = bdev_get_queue(sb->s_bdev);
+       unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev);
        struct ext4_group_info *grp;
        ext4_group_t group, first_group, last_group;
        ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
            range->len < sb->s_blocksize)
                return -EINVAL;
        /* No point to try to trim less than discard granularity */
-       if (range->minlen < q->limits.discard_granularity) {
+       if (range->minlen < discard_granularity) {
                minlen = EXT4_NUM_B2C(EXT4_SB(sb),
-                       q->limits.discard_granularity >> sb->s_blocksize_bits);
+                               discard_granularity >> sb->s_blocksize_bits);
                if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
                        goto out;
        }
 
 {
        struct inode *inode = file_inode(filp);
        struct super_block *sb = inode->i_sb;
-       struct request_queue *q = bdev_get_queue(sb->s_bdev);
        struct fstrim_range range;
        int ret;
 
                return ret;
 
        range.minlen = max((unsigned int)range.minlen,
-                               q->limits.discard_granularity);
+                          bdev_discard_granularity(sb->s_bdev));
        ret = f2fs_trim_fs(F2FS_SB(sb), &range);
        mnt_drop_write_file(filp);
        if (ret < 0)
 
        struct super_block *sb = inode->i_sb;
        struct fstrim_range __user *user_range;
        struct fstrim_range range;
-       struct request_queue *q = bdev_get_queue(sb->s_bdev);
        int err;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EFAULT;
 
        range.minlen = max_t(unsigned int, range.minlen,
-                            q->limits.discard_granularity);
+                            bdev_discard_granularity(sb->s_bdev));
 
        err = fat_trim_fs(inode, &range);
        if (err < 0)
 
 {
        struct inode *inode = file_inode(filp);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
-       struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
+       struct block_device *bdev = sdp->sd_vfs->s_bdev;
        struct buffer_head *bh;
        struct gfs2_rgrpd *rgd;
        struct gfs2_rgrpd *rgd_end;
        if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
                return -EROFS;
 
-       if (!bdev_max_discard_sectors(sdp->sd_vfs->s_bdev))
+       if (!bdev_max_discard_sectors(bdev))
                return -EOPNOTSUPP;
 
        if (copy_from_user(&r, argp, sizeof(r)))
        start = r.start >> bs_shift;
        end = start + (r.len >> bs_shift);
        minlen = max_t(u64, r.minlen, sdp->sd_sb.sb_bsize);
-       minlen = max_t(u64, minlen,
-                      q->limits.discard_granularity) >> bs_shift;
+       minlen = max_t(u64, minlen, bdev_discard_granularity(bdev)) >> bs_shift;
 
        if (end <= start || minlen > sdp->sd_max_rg_data)
                return -EINVAL;
 
        case FITRIM:
        {
                struct super_block *sb = inode->i_sb;
-               struct request_queue *q = bdev_get_queue(sb->s_bdev);
                struct fstrim_range range;
                s64 ret = 0;
 
                        return -EFAULT;
 
                range.minlen = max_t(unsigned int, range.minlen,
-                       q->limits.discard_granularity);
+                                    bdev_discard_granularity(sb->s_bdev));
 
                ret = jfs_ioc_trim(inode, &range);
                if (ret < 0)
 
 static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp)
 {
        struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
-       struct request_queue *q = bdev_get_queue(nilfs->ns_bdev);
        struct fstrim_range range;
        int ret;
 
        if (copy_from_user(&range, argp, sizeof(range)))
                return -EFAULT;
 
-       range.minlen = max_t(u64, range.minlen, q->limits.discard_granularity);
+       range.minlen = max_t(u64, range.minlen,
+                            bdev_discard_granularity(nilfs->ns_bdev));
 
        down_read(&nilfs->ns_segctor_sem);
        ret = nilfs_sufile_trim_fs(nilfs->ns_sufile, &range);
 
 {
        struct fstrim_range __user *user_range;
        struct fstrim_range range;
-       struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
        int err;
 
        if (!capable(CAP_SYS_ADMIN))
        if (copy_from_user(&range, user_range, sizeof(range)))
                return -EFAULT;
 
-       range.minlen = max_t(u32, range.minlen, q->limits.discard_granularity);
+       range.minlen = max_t(u32, range.minlen,
+                            bdev_discard_granularity(sbi->sb->s_bdev));
 
        err = ntfs_trim_fs(sbi, &range);
        if (err < 0)
 
        int err;
        struct ntfs_sb_info *sbi = sb->s_fs_info;
        struct block_device *bdev = sb->s_bdev;
-       struct request_queue *rq;
        struct inode *inode;
        struct ntfs_inode *ni;
        size_t i, tt;
                goto out;
        }
 
-       rq = bdev_get_queue(bdev);
-       if (bdev_max_discard_sectors(bdev) && rq->limits.discard_granularity) {
-               sbi->discard_granularity = rq->limits.discard_granularity;
+       if (bdev_max_discard_sectors(bdev) && bdev_discard_granularity(bdev)) {
+               sbi->discard_granularity = bdev_discard_granularity(bdev);
                sbi->discard_granularity_mask_inv =
                        ~(u64)(sbi->discard_granularity - 1);
        }
 
        case FITRIM:
        {
                struct super_block *sb = inode->i_sb;
-               struct request_queue *q = bdev_get_queue(sb->s_bdev);
                struct fstrim_range range;
                int ret = 0;
 
                if (copy_from_user(&range, argp, sizeof(range)))
                        return -EFAULT;
 
-               range.minlen = max_t(u64, q->limits.discard_granularity,
+               range.minlen = max_t(u64, bdev_discard_granularity(sb->s_bdev),
                                     range.minlen);
                ret = ocfs2_trim_fs(sb, &range);
                if (ret < 0)
 
        struct xfs_mount                *mp,
        struct fstrim_range __user      *urange)
 {
-       struct request_queue    *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev);
-       unsigned int            granularity = q->limits.discard_granularity;
+       unsigned int            granularity =
+               bdev_discard_granularity(mp->m_ddev_targp->bt_bdev);
        struct fstrim_range     range;
        xfs_daddr_t             start, end, minlen;
        xfs_agnumber_t          start_agno, end_agno, agno;
 
        return bdev_get_queue(bdev)->limits.max_discard_sectors;
 }
 
+static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
+{
+       return bdev_get_queue(bdev)->limits.discard_granularity;
+}
+
 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
 {
        struct request_queue *q = bdev_get_queue(bdev);