if (lim->alignment_offset) {
lim->alignment_offset &= (lim->physical_block_size - 1);
- lim->misaligned = 0;
+ lim->features &= ~BLK_FEAT_MISALIGNED;
}
if (!(lim->features & BLK_FEAT_WRITE_CACHE))
if (!(b->features & BLK_FEAT_POLL))
t->features &= ~BLK_FEAT_POLL;
+ t->flags |= (b->flags & BLK_FEAT_MISALIGNED);
+
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_user_sectors = min_not_zero(t->max_user_sectors,
b->max_user_sectors);
t->max_segment_size = min_not_zero(t->max_segment_size,
b->max_segment_size);
- t->misaligned |= b->misaligned;
-
alignment = queue_limit_alignment_offset(b, start);
/* Bottom device has different alignment. Check that it is
/* Verify that top and bottom intervals line up */
if (max(top, bottom) % min(top, bottom)) {
- t->misaligned = 1;
+ t->flags |= BLK_FEAT_MISALIGNED;
ret = -1;
}
}
/* Physical block size a multiple of the logical block size? */
if (t->physical_block_size & (t->logical_block_size - 1)) {
t->physical_block_size = t->logical_block_size;
- t->misaligned = 1;
+ t->flags |= BLK_FEAT_MISALIGNED;
ret = -1;
}
/* Minimum I/O a multiple of the physical block size? */
if (t->io_min & (t->physical_block_size - 1)) {
t->io_min = t->physical_block_size;
- t->misaligned = 1;
+ t->flags |= BLK_FEAT_MISALIGNED;
ret = -1;
}
/* Optimal I/O a multiple of the physical block size? */
if (t->io_opt & (t->physical_block_size - 1)) {
t->io_opt = 0;
- t->misaligned = 1;
+ t->flags |= BLK_FEAT_MISALIGNED;
ret = -1;
}
/* chunk_sectors a multiple of the physical block size? */
if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
t->chunk_sectors = 0;
- t->misaligned = 1;
+ t->flags |= BLK_FEAT_MISALIGNED;
ret = -1;
}
/* Verify that new alignment_offset is on a logical block boundary */
if (t->alignment_offset & (t->logical_block_size - 1)) {
- t->misaligned = 1;
+ t->flags |= BLK_FEAT_MISALIGNED;
ret = -1;
}
{
struct request_queue *q = bdev_get_queue(bdev);
- if (q->limits.misaligned)
+ if (q->limits.flags & BLK_FEAT_MISALIGNED)
return -1;
if (bdev_is_partition(bdev))
return queue_limit_alignment_offset(&q->limits,
enum {
/* do not send FLUSH/FUA commands despite advertising a write cache */
BLK_FLAG_WRITE_CACHE_DISABLED = (1u << 0),
+
+ /* I/O topology is misaligned */
+ BLK_FEAT_MISALIGNED = (1u << 1),
};
struct queue_limits {
unsigned short max_integrity_segments;
unsigned short max_discard_segments;
- unsigned char misaligned;
unsigned char discard_misaligned;
unsigned char raid_partial_stripes_expensive;
unsigned int max_open_zones;