if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) {
                                blk_queue_max_discard_sectors(io_req->req->q, 0);
                                blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
-                               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, io_req->req->q);
                        }
                        blk_mq_end_request(io_req->req, io_req->error);
                        kfree(io_req);
                ubd_dev->queue->limits.discard_alignment = SECTOR_SIZE;
                blk_queue_max_discard_sectors(ubd_dev->queue, UBD_MAX_REQUEST);
                blk_queue_max_write_zeroes_sectors(ubd_dev->queue, UBD_MAX_REQUEST);
-               blk_queue_flag_set(QUEUE_FLAG_DISCARD, ubd_dev->queue);
        }
        blk_queue_flag_set(QUEUE_FLAG_NONROT, ubd_dev->queue);
        return 0;
 
 
        switch (bio_op(bio)) {
        case REQ_OP_DISCARD:
-               if (!blk_queue_discard(q))
+               if (!bdev_max_discard_sectors(bdev))
                        goto not_supported;
                break;
        case REQ_OP_SECURE_ERASE:
 
                        return -EOPNOTSUPP;
                op = REQ_OP_SECURE_ERASE;
        } else {
-               if (!blk_queue_discard(q))
+               if (!bdev_max_discard_sectors(bdev))
                        return -EOPNOTSUPP;
                op = REQ_OP_DISCARD;
        }
 
        QUEUE_FLAG_NAME(FAIL_IO),
        QUEUE_FLAG_NAME(NONROT),
        QUEUE_FLAG_NAME(IO_STAT),
-       QUEUE_FLAG_NAME(DISCARD),
        QUEUE_FLAG_NAME(NOXMERGES),
        QUEUE_FLAG_NAME(ADD_RANDOM),
        QUEUE_FLAG_NAME(SECERASE),
 
 {
        uint64_t range[2];
        uint64_t start, len;
-       struct request_queue *q = bdev_get_queue(bdev);
        struct inode *inode = bdev->bd_inode;
        int err;
 
        if (!(mode & FMODE_WRITE))
                return -EBADF;
 
-       if (!blk_queue_discard(q))
+       if (!bdev_max_discard_sectors(bdev))
                return -EOPNOTSUPP;
 
        if (copy_from_user(range, (void __user *)arg, sizeof(range)))
 
                        cpu_to_be32(bdev_alignment_offset(bdev));
                p->qlim->io_min = cpu_to_be32(bdev_io_min(bdev));
                p->qlim->io_opt = cpu_to_be32(bdev_io_opt(bdev));
-               p->qlim->discard_enabled = blk_queue_discard(q);
+               p->qlim->discard_enabled = !!bdev_max_discard_sectors(bdev);
                put_ldev(device);
        } else {
                struct request_queue *q = device->rq_queue;
 
                first_peer_device(device)->connection;
        struct request_queue *q = device->rq_queue;
 
-       if (bdev && !blk_queue_discard(bdev->backing_bdev->bd_disk->queue))
+       if (bdev && !bdev_max_discard_sectors(bdev->backing_bdev))
                goto not_supported;
 
        if (connection->cstate >= C_CONNECTED &&
         */
        blk_queue_discard_granularity(q, 512);
        q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
-       blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
        q->limits.max_write_zeroes_sectors =
                drbd_max_discard_sectors(connection);
        return;
 
 not_supported:
-       blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
        blk_queue_discard_granularity(q, 0);
        q->limits.max_discard_sectors = 0;
        q->limits.max_write_zeroes_sectors = 0;
 }
 
-static void fixup_discard_if_not_supported(struct request_queue *q)
-{
-       /* To avoid confusion, if this queue does not support discard, clear
-        * max_discard_sectors, which is what lsblk -D reports to the user.
-        * Older kernels got this wrong in "stack limits".
-        * */
-       if (!blk_queue_discard(q)) {
-               blk_queue_max_discard_sectors(q, 0);
-               blk_queue_discard_granularity(q, 0);
-       }
-}
-
 static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
 {
        /* Fixup max_write_zeroes_sectors after blk_stack_limits():
                blk_stack_limits(&q->limits, &b->limits, 0);
                disk_update_readahead(device->vdisk);
        }
-       fixup_discard_if_not_supported(q);
        fixup_write_zeroes(device, q);
 }
 
        if (disk_conf->al_extents > drbd_al_extents_max(nbc))
                disk_conf->al_extents = drbd_al_extents_max(nbc);
 
-       if (!blk_queue_discard(q)) {
+       if (!bdev_max_discard_sectors(bdev)) {
                if (disk_conf->rs_discard_granularity) {
                        disk_conf->rs_discard_granularity = 0; /* disable feature */
                        drbd_info(device, "rs_discard_granularity feature disabled\n");
 
 
 static bool can_do_reliable_discards(struct drbd_device *device)
 {
-       struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
        struct disk_conf *dc;
        bool can_do;
 
-       if (!blk_queue_discard(q))
+       if (!bdev_max_discard_sectors(device->ldev->backing_bdev))
                return false;
 
        rcu_read_lock();
 
 
        mode |= FALLOC_FL_KEEP_SIZE;
 
-       if (!blk_queue_discard(lo->lo_queue)) {
-               ret = -EOPNOTSUPP;
-               goto out;
-       }
+       if (!bdev_max_discard_sectors(lo->lo_device))
+               return -EOPNOTSUPP;
 
        ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
        if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
-               ret = -EIO;
- out:
+               return -EIO;
        return ret;
 }
 
                q->limits.discard_granularity = granularity;
                blk_queue_max_discard_sectors(q, max_discard_sectors);
                blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
-               blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
        } else {
                q->limits.discard_granularity = 0;
                blk_queue_max_discard_sectors(q, 0);
                blk_queue_max_write_zeroes_sectors(q, 0);
-               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
        }
        q->limits.discard_alignment = 0;
 }
 
                set_disk_ro(nbd->disk, true);
        else
                set_disk_ro(nbd->disk, false);
-       if (config->flags & NBD_FLAG_SEND_TRIM)
-               blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
        if (config->flags & NBD_FLAG_SEND_FLUSH) {
                if (config->flags & NBD_FLAG_SEND_FUA)
                        blk_queue_write_cache(nbd->disk->queue, true, true);
                nbd->tag_set.timeout = 0;
                nbd->disk->queue->limits.discard_granularity = 0;
                nbd->disk->queue->limits.discard_alignment = 0;
-               blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
-               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
+               blk_queue_max_discard_sectors(nbd->disk->queue, 0);
 
                mutex_unlock(&nbd->config_lock);
                nbd_put(nbd);
 
        nullb->q->limits.discard_granularity = nullb->dev->blocksize;
        nullb->q->limits.discard_alignment = nullb->dev->blocksize;
        blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
-       blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
 }
 
 static const struct block_device_operations null_bio_ops = {
 
        blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
 
        if (rbd_dev->opts->trim) {
-               blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
                q->limits.discard_granularity = rbd_dev->opts->alloc_size;
                blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
                blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
 
        blk_queue_max_discard_sectors(dev->queue, dev->max_discard_sectors);
        dev->queue->limits.discard_granularity  = dev->discard_granularity;
        dev->queue->limits.discard_alignment    = dev->discard_alignment;
-       if (dev->max_discard_sectors)
-               blk_queue_flag_set(QUEUE_FLAG_DISCARD, dev->queue);
        if (dev->secure_discard)
                blk_queue_flag_set(QUEUE_FLAG_SECERASE, dev->queue);
 
 
 
 static inline int rnbd_dev_get_max_discard_sects(const struct rnbd_dev *dev)
 {
-       if (!blk_queue_discard(bdev_get_queue(dev->bdev)))
-               return 0;
-
        return bdev_max_discard_sectors(dev->bdev);
 }
 
 
                        v = sg_elems;
                blk_queue_max_discard_segments(q,
                                               min(v, MAX_DISCARD_SEGMENTS));
-
-               blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
        }
 
        if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
 
        if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
                return;
 
-       if (blk_queue_discard(q)) {
+       if (bdev_max_discard_sectors(bdev)) {
                err = xenbus_printf(xbt, dev->nodename,
                        "discard-granularity", "%u",
                        q->limits.discard_granularity);
 
        blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
 
        if (info->feature_discard) {
-               blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
                blk_queue_max_discard_sectors(rq, get_capacity(gd));
                rq->limits.discard_granularity = info->discard_granularity ?:
                                                 info->physical_sector_size;
                                blkif_req(req)->error = BLK_STS_NOTSUPP;
                                info->feature_discard = 0;
                                info->feature_secdiscard = 0;
-                               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
+                               blk_queue_max_discard_sectors(rq, 0);
                                blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
                        }
                        break;
 
        blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
        zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
        blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
-       blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue);
 
        /*
         * zram_bio_discard() will clear all logical blocks if logical block
 
                bio_get(s->iop.bio);
 
                if (bio_op(bio) == REQ_OP_DISCARD &&
-                   !blk_queue_discard(bdev_get_queue(dc->bdev)))
+                   !bdev_max_discard_sectors(dc->bdev))
                        goto insert_data;
 
                /* I/O request sent to backing device */
        bio->bi_private = ddip;
 
        if ((bio_op(bio) == REQ_OP_DISCARD) &&
-           !blk_queue_discard(bdev_get_queue(dc->bdev)))
+           !bdev_max_discard_sectors(dc->bdev))
                bio->bi_end_io(bio);
        else
                submit_bio_noacct(bio);
 
 
        blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
        blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
-       blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue);
 
        blk_queue_write_cache(q, true, true);
 
        ca->bdev->bd_holder = ca;
        ca->sb_disk = sb_disk;
 
-       if (blk_queue_discard(bdev_get_queue(bdev)))
+       if (bdev_max_discard_sectors((bdev)))
                ca->discard = CACHE_DISCARD(&ca->sb);
 
        ret = cache_alloc(ca);
 
        if (attr == &sysfs_discard) {
                bool v = strtoul_or_return(buf);
 
-               if (blk_queue_discard(bdev_get_queue(ca->bdev)))
+               if (bdev_max_discard_sectors(ca->bdev))
                        ca->discard = v;
 
                if (v != CACHE_DISCARD(&ca->sb)) {
 
        return r;
 }
 
-static bool origin_dev_supports_discard(struct block_device *origin_bdev)
-{
-       struct request_queue *q = bdev_get_queue(origin_bdev);
-
-       return blk_queue_discard(q);
-}
-
 /*
  * If discard_passdown was enabled verify that the origin device
  * supports discards.  Disable discard_passdown if not.
        if (!cache->features.discard_passdown)
                return;
 
-       if (!origin_dev_supports_discard(origin_bdev))
+       if (!bdev_max_discard_sectors(origin_bdev))
                reason = "discard unsupported";
 
        else if (origin_limits->max_discard_sectors < cache->sectors_per_block)
 
        do_waker(&clone->waker.work);
 }
 
-static bool bdev_supports_discards(struct block_device *bdev)
-{
-       struct request_queue *q = bdev_get_queue(bdev);
-
-       return (q && blk_queue_discard(q));
-}
-
 /*
  * If discard_passdown was enabled verify that the destination device supports
  * discards. Disable discard_passdown if not.
        if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
                return;
 
-       if (!bdev_supports_discards(dest_dev))
+       if (!bdev_max_discard_sectors(dest_dev))
                reason = "discard unsupported";
        else if (dest_limits->max_discard_sectors < clone->region_size)
                reason = "max discard sectors smaller than a region";
 
 static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limits)
 {
        struct log_writes_c *lc = ti->private;
-       struct request_queue *q = bdev_get_queue(lc->dev->bdev);
 
-       if (!q || !blk_queue_discard(q)) {
+       if (!bdev_max_discard_sectors(lc->dev->bdev)) {
                lc->device_supports_discard = false;
                limits->discard_granularity = lc->sectorsize;
                limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
 
        raid456 = rs_is_raid456(rs);
 
        for (i = 0; i < rs->raid_disks; i++) {
-               struct request_queue *q;
-
-               if (!rs->dev[i].rdev.bdev)
-                       continue;
-
-               q = bdev_get_queue(rs->dev[i].rdev.bdev);
-               if (!q || !blk_queue_discard(q))
+               if (!rs->dev[i].rdev.bdev ||
+                   !bdev_max_discard_sectors(rs->dev[i].rdev.bdev))
                        return;
 
                if (raid456) {
 
 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
                                      sector_t start, sector_t len, void *data)
 {
-       struct request_queue *q = bdev_get_queue(dev->bdev);
-
-       return !blk_queue_discard(q);
+       return !bdev_max_discard_sectors(dev->bdev);
 }
 
 static bool dm_table_supports_discards(struct dm_table *t)
                blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
 
        if (!dm_table_supports_discards(t)) {
-               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
-               /* Must also clear discard limits... */
                q->limits.max_discard_sectors = 0;
                q->limits.max_hw_discard_sectors = 0;
                q->limits.discard_granularity = 0;
                q->limits.discard_alignment = 0;
                q->limits.discard_misaligned = 0;
-       } else
-               blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+       }
 
        if (dm_table_supports_secure_erase(t))
                blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
 
 /*----------------------------------------------------------------
  * Binding of control targets to a pool object
  *--------------------------------------------------------------*/
-static bool data_dev_supports_discard(struct pool_c *pt)
-{
-       struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
-
-       return blk_queue_discard(q);
-}
-
 static bool is_factor(sector_t block_size, uint32_t n)
 {
        return !sector_div(block_size, n);
        if (!pt->adjusted_pf.discard_passdown)
                return;
 
-       if (!data_dev_supports_discard(pt))
+       if (!bdev_max_discard_sectors(pt->data_dev->bdev))
                reason = "discard unsupported";
 
        else if (data_limits->max_discard_sectors < pool->sectors_per_block)
                /*
                 * Must explicitly disallow stacking discard limits otherwise the
                 * block layer will stack them if pool's data device has support.
-                * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
-                * user to see that, so make sure to set all discard limits to 0.
                 */
                limits->discard_granularity = 0;
                return;
 
 
        /* device doesn't really support DISCARD, disable it */
        limits->max_discard_sectors = 0;
-       blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
 }
 
 void disable_write_zeroes(struct mapped_device *md)
 
        if (unlikely(error == BLK_STS_TARGET)) {
                if (bio_op(bio) == REQ_OP_DISCARD &&
-                   !q->limits.max_discard_sectors)
+                   !bdev_max_discard_sectors(bio->bi_bdev))
                        disable_discard(md);
                else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
                         !q->limits.max_write_zeroes_sectors)
 
        struct linear_conf *conf;
        struct md_rdev *rdev;
        int i, cnt;
-       bool discard_supported = false;
 
        conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL);
        if (!conf)
 
                conf->array_sectors += rdev->sectors;
                cnt++;
-
-               if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
-                       discard_supported = true;
        }
        if (cnt != raid_disks) {
                pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
                goto out;
        }
 
-       if (!discard_supported)
-               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
-       else
-               blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
-
        /*
         * Here we calculate the device offsets.
         */
                start_sector + data_offset;
 
        if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
-                    !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) {
+                    !bdev_max_discard_sectors(bio->bi_bdev))) {
                /* Just ignore it */
                bio_endio(bio);
        } else {
 
        conf = mddev->private;
        if (mddev->queue) {
                struct md_rdev *rdev;
-               bool discard_supported = false;
 
                blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
                blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
                rdev_for_each(rdev, mddev) {
                        disk_stack_limits(mddev->gendisk, rdev->bdev,
                                          rdev->data_offset << 9);
-                       if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
-                               discard_supported = true;
                }
-               if (!discard_supported)
-                       blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
-               else
-                       blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
        }
 
        /* calculate array device size */
 
                if (test_bit(Faulty, &rdev->flags)) {
                        bio_io_error(bio);
                } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
-                                   !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
+                                   !bdev_max_discard_sectors(bio->bi_bdev)))
                        /* Just ignore it */
                        bio_endio(bio);
                else
                        break;
                }
        }
-       if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
-               blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
        print_conf(conf);
        return err;
 }
        int i;
        struct md_rdev *rdev;
        int ret;
-       bool discard_supported = false;
 
        if (mddev->level != 1) {
                pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
                        continue;
                disk_stack_limits(mddev->gendisk, rdev->bdev,
                                  rdev->data_offset << 9);
-               if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
-                       discard_supported = true;
        }
 
        mddev->degraded = 0;
 
        md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
 
-       if (mddev->queue) {
-               if (discard_supported)
-                       blk_queue_flag_set(QUEUE_FLAG_DISCARD,
-                                               mddev->queue);
-               else
-                       blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
-                                                 mddev->queue);
-       }
-
        ret = md_integrity_register(mddev);
        if (ret) {
                md_unregister_thread(&mddev->thread);
 
                        if (test_bit(Faulty, &rdev->flags)) {
                                bio_io_error(bio);
                        } else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
-                                           !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
+                                           !bdev_max_discard_sectors(bio->bi_bdev)))
                                /* Just ignore it */
                                bio_endio(bio);
                        else
                if (test_bit(Faulty, &rdev->flags)) {
                        bio_io_error(bio);
                } else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
-                                   !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
+                                   !bdev_max_discard_sectors(bio->bi_bdev)))
                        /* Just ignore it */
                        bio_endio(bio);
                else
                rcu_assign_pointer(p->rdev, rdev);
                break;
        }
-       if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
-               blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
 
        print_conf(conf);
        return err;
        sector_t size;
        sector_t min_offset_diff = 0;
        int first = 1;
-       bool discard_supported = false;
 
        if (mddev_init_writes_pending(mddev) < 0)
                return -ENOMEM;
                                          rdev->data_offset << 9);
 
                disk->head_position = 0;
-
-               if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
-                       discard_supported = true;
                first = 0;
        }
 
-       if (mddev->queue) {
-               if (discard_supported)
-                       blk_queue_flag_set(QUEUE_FLAG_DISCARD,
-                                               mddev->queue);
-               else
-                       blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
-                                                 mddev->queue);
-       }
        /* need to check that every block has at least one working mirror */
        if (!enough(conf, -1)) {
                pr_err("md/raid10:%s: not enough operational mirrors.\n",
 
 
        r5l_write_super(log, end);
 
-       if (!blk_queue_discard(bdev_get_queue(bdev)))
+       if (!bdev_max_discard_sectors(bdev))
                return;
 
        mddev = log->rdev->mddev;
 
                 * A better idea might be to turn DISCARD into WRITE_ZEROES
                 * requests, as that is required to be safe.
                 */
-               if (devices_handle_discard_safely &&
-                   mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
-                   mddev->queue->limits.discard_granularity >= stripe)
-                       blk_queue_flag_set(QUEUE_FLAG_DISCARD,
-                                               mddev->queue);
-               else
-                       blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
-                                               mddev->queue);
+               if (!devices_handle_discard_safely ||
+                   mddev->queue->limits.max_discard_sectors < (stripe >> 9) ||
+                   mddev->queue->limits.discard_granularity < stripe)
+                       blk_queue_max_discard_sectors(mddev->queue, 0);
 
                blk_queue_max_hw_sectors(mddev->queue, UINT_MAX);
        }
 
        if (!max_discard)
                return;
 
-       blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
        blk_queue_max_discard_sectors(q, max_discard);
        q->limits.discard_granularity = card->pref_erase << 9;
        /* granularity must not be greater than max. discard */
 
        blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
 
        if (tr->discard) {
-               blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
                blk_queue_max_discard_sectors(new->rq, UINT_MAX);
                new->rq->limits.discard_granularity = tr->blksize;
        }
 
        u32 size = queue_logical_block_size(queue);
 
        if (ctrl->max_discard_sectors == 0) {
-               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
+               blk_queue_max_discard_sectors(queue, 0);
                return;
        }
 
        queue->limits.discard_granularity = size;
 
        /* If discard is already enabled, don't reset queue limits */
-       if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
+       if (queue->limits.max_discard_sectors)
                return;
 
        blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
 
 
        blk_queue_max_discard_sectors(q, max_discard_sectors);
        blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
-       blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
 }
 
 static int dasd_fba_pe_handler(struct dasd_device *device,
 
        case SD_LBP_FULL:
        case SD_LBP_DISABLE:
                blk_queue_max_discard_sectors(q, 0);
-               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
                return;
 
        case SD_LBP_UNMAP:
        }
 
        blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
-       blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
 }
 
 static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
 
        struct request_queue *q = bdev_get_queue(bdev);
        int block_size = bdev_logical_block_size(bdev);
 
-       if (!blk_queue_discard(q))
+       if (!bdev_max_discard_sectors(bdev))
                return false;
 
        attrib->max_unmap_lba_count =
 
                ret = btrfs_reset_device_zone(dev_replace->tgtdev, phys, len,
                                              &discarded);
                discarded += src_disc;
-       } else if (blk_queue_discard(bdev_get_queue(stripe->dev->bdev))) {
+       } else if (bdev_max_discard_sectors(stripe->dev->bdev)) {
                ret = btrfs_issue_discard(dev->bdev, phys, len, &discarded);
        } else {
                ret = 0;
        *trimmed = 0;
 
        /* Discard not supported = nothing to do. */
-       if (!blk_queue_discard(bdev_get_queue(device->bdev)))
+       if (!bdev_max_discard_sectors(device->bdev))
                return 0;
 
        /* Not writable = nothing to do. */
 
                if (!device->bdev)
                        continue;
                q = bdev_get_queue(device->bdev);
-               if (blk_queue_discard(q)) {
+               if (bdev_max_discard_sectors(device->bdev)) {
                        num_devices++;
                        minlen = min_t(u64, q->limits.discard_granularity,
                                     minlen);
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       if (!blk_queue_discard(q))
+       if (!bdev_max_discard_sectors(inode->i_sb->s_bdev))
                return -EOPNOTSUPP;
 
        if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range)))
 
        if (opts->allow_utime == (unsigned short)-1)
                opts->allow_utime = ~opts->fs_dmask & 0022;
 
-       if (opts->discard) {
-               struct request_queue *q = bdev_get_queue(sb->s_bdev);
-
-               if (!blk_queue_discard(q)) {
-                       exfat_warn(sb, "mounting with \"discard\" option, but the device does not support discard");
-                       opts->discard = 0;
-               }
+       if (opts->discard && !bdev_max_discard_sectors(sb->s_bdev)) {
+               exfat_warn(sb, "mounting with \"discard\" option, but the device does not support discard");
+               opts->discard = 0;
        }
 
        sb->s_flags |= SB_NODIRATIME;
 
        __u32 flags = 0;
        unsigned int flush_flags = 0;
        struct super_block *sb = file_inode(filp)->i_sb;
-       struct request_queue *q;
 
        if (copy_from_user(&flags, (__u32 __user *)arg,
                                sizeof(__u32)))
        if (flags & ~EXT4_IOC_CHECKPOINT_FLAG_VALID)
                return -EINVAL;
 
-       q = bdev_get_queue(EXT4_SB(sb)->s_journal->j_dev);
-       if (!q)
-               return -ENXIO;
-       if ((flags & JBD2_JOURNAL_FLUSH_DISCARD) && !blk_queue_discard(q))
+       if ((flags & JBD2_JOURNAL_FLUSH_DISCARD) &&
+           !bdev_max_discard_sectors(EXT4_SB(sb)->s_journal->j_dev))
                return -EOPNOTSUPP;
 
        if (flags & EXT4_IOC_CHECKPOINT_FLAG_DRY_RUN)
 
        case FITRIM:
        {
-               struct request_queue *q = bdev_get_queue(sb->s_bdev);
                struct fstrim_range range;
                int ret = 0;
 
                if (!capable(CAP_SYS_ADMIN))
                        return -EPERM;
 
-               if (!blk_queue_discard(q))
+               if (!bdev_max_discard_sectors(sb->s_bdev))
                        return -EOPNOTSUPP;
 
                /*
 
                        goto failed_mount9;
        }
 
-       if (test_opt(sb, DISCARD)) {
-               struct request_queue *q = bdev_get_queue(sb->s_bdev);
-               if (!blk_queue_discard(q))
-                       ext4_msg(sb, KERN_WARNING,
-                                "mounting with \"discard\" option, but "
-                                "the device does not support discard");
-       }
+       if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev))
+               ext4_msg(sb, KERN_WARNING,
+                        "mounting with \"discard\" option, but the device does not support discard");
 
        if (es->s_error_count)
                mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
 
 
 static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
 {
-       return blk_queue_discard(bdev_get_queue(bdev)) ||
-              bdev_is_zoned(bdev);
+       return bdev_max_discard_sectors(bdev) || bdev_is_zoned(bdev);
 }
 
 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       if (!blk_queue_discard(q))
+       if (!bdev_max_discard_sectors(sb->s_bdev))
                return -EOPNOTSUPP;
 
        user_range = (struct fstrim_range __user *)arg;
 
                goto out_fail;
        }
 
-       if (sbi->options.discard) {
-               struct request_queue *q = bdev_get_queue(sb->s_bdev);
-               if (!blk_queue_discard(q))
-                       fat_msg(sb, KERN_WARNING,
-                                       "mounting with \"discard\" option, but "
-                                       "the device does not support discard");
-       }
+       if (sbi->options.discard && !bdev_max_discard_sectors(sb->s_bdev))
+               fat_msg(sb, KERN_WARNING,
+                       "mounting with \"discard\" option, but the device does not support discard");
 
        fat_set_state(sb, 1, 0);
        return 0;
 
        if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
                return -EROFS;
 
-       if (!blk_queue_discard(q))
+       if (!bdev_max_discard_sectors(sdp->sd_vfs->s_bdev))
                return -EOPNOTSUPP;
 
        if (copy_from_user(&r, argp, sizeof(r)))
 
        unsigned long block, log_offset; /* logical */
        unsigned long long phys_block, block_start, block_stop; /* physical */
        loff_t byte_start, byte_stop, byte_count;
-       struct request_queue *q = bdev_get_queue(journal->j_dev);
 
        /* flags must be set to either discard or zeroout */
        if ((flags & ~JBD2_JOURNAL_FLUSH_VALID) || !flags ||
                        (flags & JBD2_JOURNAL_FLUSH_ZEROOUT)))
                return -EINVAL;
 
-       if (!q)
-               return -ENXIO;
-
-       if ((flags & JBD2_JOURNAL_FLUSH_DISCARD) && !blk_queue_discard(q))
+       if ((flags & JBD2_JOURNAL_FLUSH_DISCARD) &&
+           !bdev_max_discard_sectors(journal->j_dev))
                return -EOPNOTSUPP;
 
        /*
 
                if (!capable(CAP_SYS_ADMIN))
                        return -EPERM;
 
-               if (!blk_queue_discard(q)) {
+               if (!bdev_max_discard_sectors(sb->s_bdev)) {
                        jfs_warn("FITRIM not supported on device");
                        return -EOPNOTSUPP;
                }
 
                }
 
                case Opt_discard:
-               {
-                       struct request_queue *q = bdev_get_queue(sb->s_bdev);
                        /* if set to 1, even copying files will cause
                         * trimming :O
                         * -> user has more control over the online trimming
                         */
                        sbi->minblks_trim = 64;
-                       if (blk_queue_discard(q))
+                       if (bdev_max_discard_sectors(sb->s_bdev))
                                *flag |= JFS_DISCARD;
                        else
                                pr_err("JFS: discard option not supported on device\n");
                        break;
-               }
 
                case Opt_nodiscard:
                        *flag &= ~JFS_DISCARD;
 
                case Opt_discard_minblk:
                {
-                       struct request_queue *q = bdev_get_queue(sb->s_bdev);
                        char *minblks_trim = args[0].from;
                        int rc;
-                       if (blk_queue_discard(q)) {
+                       if (bdev_max_discard_sectors(sb->s_bdev)) {
                                *flag |= JFS_DISCARD;
                                rc = kstrtouint(minblks_trim, 0,
                                                &sbi->minblks_trim);
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       if (!blk_queue_discard(q))
+       if (!bdev_max_discard_sectors(nilfs->ns_bdev))
                return -EOPNOTSUPP;
 
        if (copy_from_user(&range, argp, sizeof(range)))
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       if (!blk_queue_discard(q))
+       if (!bdev_max_discard_sectors(sbi->sb->s_bdev))
                return -EOPNOTSUPP;
 
        user_range = (struct fstrim_range __user *)arg;
 
        }
 
        rq = bdev_get_queue(bdev);
-       if (blk_queue_discard(rq) && rq->limits.discard_granularity) {
+       if (bdev_max_discard_sectors(bdev) && rq->limits.discard_granularity) {
                sbi->discard_granularity = rq->limits.discard_granularity;
                sbi->discard_granularity_mask_inv =
                        ~(u64)(sbi->discard_granularity - 1);
 
                if (!capable(CAP_SYS_ADMIN))
                        return -EPERM;
 
-               if (!blk_queue_discard(q))
+               if (!bdev_max_discard_sectors(sb->s_bdev))
                        return -EOPNOTSUPP;
 
                if (copy_from_user(&range, argp, sizeof(range)))
 
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
-       if (!blk_queue_discard(q))
+       if (!bdev_max_discard_sectors(mp->m_ddev_targp->bt_bdev))
                return -EOPNOTSUPP;
 
        /*
 
                        goto out_filestream_unmount;
        }
 
-       if (xfs_has_discard(mp)) {
-               struct request_queue *q = bdev_get_queue(sb->s_bdev);
-
-               if (!blk_queue_discard(q)) {
-                       xfs_warn(mp, "mounting with \"discard\" option, but "
-                                       "the device does not support discard");
-                       mp->m_features &= ~XFS_FEAT_DISCARD;
-               }
+       if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) {
+               xfs_warn(mp,
+       "mounting with \"discard\" option, but the device does not support discard");
+               mp->m_features &= ~XFS_FEAT_DISCARD;
        }
 
        if (xfs_has_reflink(mp)) {
 
 #define QUEUE_FLAG_NONROT      6       /* non-rotational device (SSD) */
 #define QUEUE_FLAG_VIRT                QUEUE_FLAG_NONROT /* paravirt device */
 #define QUEUE_FLAG_IO_STAT     7       /* do disk/partitions IO accounting */
-#define QUEUE_FLAG_DISCARD     8       /* supports DISCARD */
 #define QUEUE_FLAG_NOXMERGES   9       /* No extended merges */
 #define QUEUE_FLAG_ADD_RANDOM  10      /* Contributes to random pool */
 #define QUEUE_FLAG_SECERASE    11      /* supports secure erase */
        test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
 #define blk_queue_io_stat(q)   test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
 #define blk_queue_add_random(q)        test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
-#define blk_queue_discard(q)   test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
 #define blk_queue_zone_resetall(q)     \
        test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
 #define blk_queue_secure_erase(q) \
 
        return nr_extents;
 }
 
-/*
- * Helper to sys_swapon determining if a given swap
- * backing device queue supports DISCARD operations.
- */
-static bool swap_discardable(struct swap_info_struct *si)
-{
-       struct request_queue *q = bdev_get_queue(si->bdev);
-
-       if (!blk_queue_discard(q))
-               return false;
-
-       return true;
-}
-
 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
 {
        struct swap_info_struct *p;
                                         sizeof(long),
                                         GFP_KERNEL);
 
-       if (p->bdev && (swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
+       if ((swap_flags & SWAP_FLAG_DISCARD) &&
+           p->bdev && bdev_max_discard_sectors(p->bdev)) {
                /*
                 * When discard is enabled for swap with no particular
                 * policy flagged, we set all swap discard flags here in