static ssize_t queue_wc_show(struct request_queue *q, char *page)
{
- if (q->limits.features & BLK_FLAGS_WRITE_CACHE_DISABLED)
+ if (q->limits.features & BLK_FLAG_WRITE_CACHE_DISABLED)
return sprintf(page, "write through\n");
return sprintf(page, "write back\n");
}
lim = queue_limits_start_update(q);
if (disable)
- lim.flags |= BLK_FLAGS_WRITE_CACHE_DISABLED;
+ lim.flags |= BLK_FLAG_WRITE_CACHE_DISABLED;
else
- lim.flags &= ~BLK_FLAGS_WRITE_CACHE_DISABLED;
+ lim.flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED;
err = queue_limits_commit_update(q, &lim);
if (err)
return err;
/* internal flags in queue_limits.flags */
enum {
- /* do not send FLUSH or FUA command despite advertised write cache */
- BLK_FLAGS_WRITE_CACHE_DISABLED = (1u << 31),
+ /* do not send FLUSH/FUA commands despite advertising a write cache */
+ BLK_FLAG_WRITE_CACHE_DISABLED = (1u << 0),
};
struct queue_limits {
static inline bool blk_queue_write_cache(struct request_queue *q)
{
return (q->limits.features & BLK_FEAT_WRITE_CACHE) &&
- !(q->limits.flags & BLK_FLAGS_WRITE_CACHE_DISABLED);
+ !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED);
}
static inline bool bdev_write_cache(struct block_device *bdev)