static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
                               struct drbd_backing_dev *nbc)
 {
-       struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
+       struct block_device *bdev = nbc->backing_bdev;
+       struct request_queue *q = bdev->bd_disk->queue;
 
        if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
                disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
 
        if (disk_conf->rs_discard_granularity) {
                int orig_value = disk_conf->rs_discard_granularity;
+               sector_t discard_size = bdev_max_discard_sectors(bdev) << 9;
                int remainder;
 
                if (q->limits.discard_granularity > disk_conf->rs_discard_granularity)
                remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity;
                disk_conf->rs_discard_granularity += remainder;
 
-               if (disk_conf->rs_discard_granularity > q->limits.max_discard_sectors << 9)
-                       disk_conf->rs_discard_granularity = q->limits.max_discard_sectors << 9;
+               if (disk_conf->rs_discard_granularity > discard_size)
+                       disk_conf->rs_discard_granularity = discard_size;
 
                if (disk_conf->rs_discard_granularity != orig_value)
                        drbd_info(device, "rs_discard_granularity changed to %d\n",
 
        granularity = max(q->limits.discard_granularity >> 9, 1U);
        alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
 
-       max_discard_sectors = min(q->limits.max_discard_sectors, (1U << 22));
+       max_discard_sectors = min(bdev_max_discard_sectors(bdev), (1U << 22));
        max_discard_sectors -= max_discard_sectors % granularity;
        if (unlikely(!max_discard_sectors))
                goto zero_out;
 
        if (!blk_queue_discard(bdev_get_queue(dev->bdev)))
                return 0;
 
-       return blk_queue_get_max_sectors(bdev_get_queue(dev->bdev),
-                                        REQ_OP_DISCARD);
+       return bdev_max_discard_sectors(dev->bdev);
 }
 
 static inline int rnbd_dev_get_discard_granularity(const struct rnbd_dev *dev)
 
         * Reject unsupported discard and write same requests.
         */
        if (op == REQ_OP_DISCARD)
-               special_cmd_max_sectors = q->limits.max_discard_sectors;
+               special_cmd_max_sectors = bdev_max_discard_sectors(where->bdev);
        else if (op == REQ_OP_WRITE_ZEROES)
                special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
        if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
 
 }
 
 /*
- * Check if the underlying struct block_device request_queue supports
- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
- * in ATA and we need to set TPE=1
+ * Check if the underlying struct block_device supports discard and if yes
+ * configure the UNMAP parameters.
  */
 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
                                       struct block_device *bdev)
                return false;
 
        attrib->max_unmap_lba_count =
-               q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
+               bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9);
        /*
         * Currently hardcoded to 1 in Linux/SCSI code..
         */
 
                                                unsigned int *issued)
 {
        struct block_device *bdev = dc->bdev;
-       struct request_queue *q = bdev_get_queue(bdev);
        unsigned int max_discard_blocks =
-                       SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
+                       SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
        struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
                                        &(dcc->fstrim_list) : &(dcc->wait_list);
        struct discard_cmd *dc;
        struct discard_info di = {0};
        struct rb_node **insert_p = NULL, *insert_parent = NULL;
-       struct request_queue *q = bdev_get_queue(bdev);
        unsigned int max_discard_blocks =
-                       SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
+                       SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
        block_t end = lstart + len;
 
        dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
 
 int bdev_alignment_offset(struct block_device *bdev);
 unsigned int bdev_discard_alignment(struct block_device *bdev);
 
+static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
+{
+       return bdev_get_queue(bdev)->limits.max_discard_sectors;
+}
+
 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
 {
        struct request_queue *q = bdev_get_queue(bdev);