QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(QUIESCED),
QUEUE_FLAG_NAME(PCI_P2PDMA),
- QUEUE_FLAG_NAME(ZONE_RESETALL),
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
QUEUE_FLAG_NAME(HCTX_ACTIVE),
QUEUE_FLAG_NAME(SQ_SCHED),
sector += dev->zone_size_sects;
}
- lim->features |= BLK_FEAT_ZONED;
+ lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
lim->chunk_sectors = dev->zone_size_sects;
lim->max_zone_append_sectors = dev->zone_append_max_sectors;
lim->max_open_zones = dev->zone_max_open;
struct request_queue *q = nullb->q;
struct gendisk *disk = nullb->disk;
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
disk->nr_zones = bdev_nr_zones(disk->part0);
pr_info("%s: using %s zone append\n",
static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
{
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
-
ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
}
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
return -EOPNOTSUPP;
- lim.features |= BLK_FEAT_ZONED;
+ lim.features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
lim.max_active_zones = p->max_active_zones;
lim.max_open_zones = p->max_open_zones;
lim.max_zone_append_sectors = p->max_zone_append_sectors;
dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
- lim->features |= BLK_FEAT_ZONED;
+ lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_open_zones, &v);
*/
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
(lim.features & BLK_FEAT_ZONED)) {
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue);
err = blk_revalidate_disk_zones(vblk->disk);
if (err)
goto out_cleanup_disk;
void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
struct nvme_zone_info *zi)
{
- lim->features |= BLK_FEAT_ZONED;
+ lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
lim->max_open_zones = zi->max_open_zones;
lim->max_active_zones = zi->max_active_zones;
lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
lim->chunk_sectors = ns->head->zsze =
nvme_lba_to_sect(ns->head, zi->zone_size);
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
}
static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim,
u8 buf[SD_BUF_SIZE])
{
- struct gendisk *disk = sdkp->disk;
- struct request_queue *q = disk->queue;
unsigned int nr_zones;
u32 zone_blocks = 0;
int ret;
if (sdkp->device->type != TYPE_ZBC)
return 0;
- lim->features |= BLK_FEAT_ZONED;
+ lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
/*
* Per ZBC and ZAC specifications, writes in sequential write required
sdkp->early_zone_info.zone_blocks = zone_blocks;
/* The drive satisfies the kernel restrictions: set it up */
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
if (sdkp->zones_max_open == U32_MAX)
lim->max_open_zones = 0;
else
/* is a zoned device */
BLK_FEAT_ZONED = (1u << 10),
+
+ /* supports Zone Reset All */
+ BLK_FEAT_ZONE_RESETALL = (1u << 11),
};
/*
#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
-#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */
#define blk_queue_nonrot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL)
#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
#define blk_queue_zone_resetall(q) \
- test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
+ ((q)->limits.features & BLK_FEAT_ZONE_RESETALL)
#define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX)
#define blk_queue_pci_p2pdma(q) \
test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)