*/
struct bio *bio_split_to_limits(struct bio *bio)
{
- const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
unsigned int nr_segs;
- return __bio_split_to_limits(bio, lim, &nr_segs);
+ return __bio_split_to_limits(bio, bdev_limits(bio->bi_bdev), &nr_segs);
}
EXPORT_SYMBOL(bio_split_to_limits);
void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
sector_t offset, const char *pfx)
{
- if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits,
+ if (blk_stack_limits(t, bdev_limits(bdev),
get_start_sect(bdev) + offset))
pr_notice("%s: Warning: Device %pg is misaligned\n",
pfx, bdev);
static void disable_passdown_if_not_supported(struct cache *cache)
{
struct block_device *origin_bdev = cache->origin_dev->bdev;
- struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
+ struct queue_limits *origin_limits = bdev_limits(origin_bdev);
const char *reason = NULL;
if (!cache->features.discard_passdown)
static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
{
struct block_device *origin_bdev = cache->origin_dev->bdev;
- struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
+ struct queue_limits *origin_limits = bdev_limits(origin_bdev);
if (!cache->features.discard_passdown) {
/* No passdown is done so setting own virtual limits */
static void disable_passdown_if_not_supported(struct clone *clone)
{
struct block_device *dest_dev = clone->dest_dev->bdev;
- struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits;
+ struct queue_limits *dest_limits = bdev_limits(dest_dev);
const char *reason = NULL;
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
{
struct block_device *dest_bdev = clone->dest_dev->bdev;
- struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits;
+ struct queue_limits *dest_limits = bdev_limits(dest_bdev);
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
/* No passdown is done so we set our own virtual limits */
{
struct pool *pool = pt->pool;
struct block_device *data_bdev = pt->data_dev->bdev;
- struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
+ struct queue_limits *data_limits = bdev_limits(data_bdev);
const char *reason = NULL;
if (!pt->adjusted_pf.discard_passdown)
* zoned mode. In this case, we don't have a valid max zone
* append size.
*/
- if (bdev_is_zoned(device->bdev)) {
- blk_stack_limits(lim,
- &bdev_get_queue(device->bdev)->limits,
- 0);
- }
+ if (bdev_is_zoned(device->bdev))
+ blk_stack_limits(lim, bdev_limits(device->bdev), 0);
}
/*
*/
#define BLK_DEF_MAX_SECTORS_CAP 2560u
+static inline struct queue_limits *bdev_limits(struct block_device *bdev)
+{
+ return &bdev_get_queue(bdev)->limits;
+}
+
static inline unsigned long queue_segment_boundary(const struct request_queue *q)
{
return q->limits.seg_boundary_mask;
static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
{
- return bdev_get_queue(bdev)->limits.max_discard_sectors;
+ return bdev_limits(bdev)->max_discard_sectors;
}
static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
{
- return bdev_get_queue(bdev)->limits.discard_granularity;
+ return bdev_limits(bdev)->discard_granularity;
}
static inline unsigned int
bdev_max_secure_erase_sectors(struct block_device *bdev)
{
- return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;
+ return bdev_limits(bdev)->max_secure_erase_sectors;
}
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
{
- return bdev_get_queue(bdev)->limits.max_write_zeroes_sectors;
+ return bdev_limits(bdev)->max_write_zeroes_sectors;
}
static inline bool bdev_nonrot(struct block_device *bdev)
static inline bool bdev_fua(struct block_device *bdev)
{
- return bdev_get_queue(bdev)->limits.features & BLK_FEAT_FUA;
+ return bdev_limits(bdev)->features & BLK_FEAT_FUA;
}
static inline bool bdev_nowait(struct block_device *bdev)