REQ_OP_NAME(WRITE),
REQ_OP_NAME(FLUSH),
REQ_OP_NAME(DISCARD),
- REQ_OP_NAME(SECURE_ERASE),
REQ_OP_NAME(ZONE_RESET),
REQ_OP_NAME(ZONE_RESET_ALL),
REQ_OP_NAME(ZONE_OPEN),
if (!blk_queue_discard(q))
goto not_supported;
break;
- case REQ_OP_SECURE_ERASE:
- if (!blk_queue_secure_erase(q))
- goto not_supported;
- break;
case REQ_OP_WRITE_SAME:
if (!q->limits.max_write_same_sectors)
goto not_supported;
{
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = *biop;
- unsigned int op;
sector_t bs_mask, part_offset = 0;
if (!q)
if (bdev_read_only(bdev))
return -EPERM;
- if (flags & BLKDEV_DISCARD_SECURE) {
- if (!blk_queue_secure_erase(q))
- return -EOPNOTSUPP;
- op = REQ_OP_SECURE_ERASE;
- } else {
- if (!blk_queue_discard(q))
- return -EOPNOTSUPP;
- op = REQ_OP_DISCARD;
- }
+ if (!blk_queue_discard(q))
+ return -EOPNOTSUPP;
/* In case the discard granularity isn't set by buggy device driver */
if (WARN_ON_ONCE(!q->limits.discard_granularity)) {
bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
- bio_set_op_attrs(bio, op, 0);
+ bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
bio->bi_iter.bi_size = req_sects << 9;
sector += req_sects;
switch (bio_op(*bio)) {
case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
break;
case REQ_OP_WRITE_ZEROES:
switch (bio_op(rq->bio)) {
case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
if (queue_max_discard_segments(rq->q) > 1) {
struct bio *bio = rq->bio;
if (blk_rq_is_passthrough(rq))
return q->limits.max_hw_sectors;
- if (!q->limits.chunk_sectors ||
- req_op(rq) == REQ_OP_DISCARD ||
- req_op(rq) == REQ_OP_SECURE_ERASE)
+ if (!q->limits.chunk_sectors || req_op(rq) == REQ_OP_DISCARD)
return blk_queue_get_max_sectors(q, req_op(rq));
return min(blk_max_size_offset(q, offset, 0),
QUEUE_FLAG_NAME(DISCARD),
QUEUE_FLAG_NAME(NOXMERGES),
QUEUE_FLAG_NAME(ADD_RANDOM),
- QUEUE_FLAG_NAME(SECERASE),
QUEUE_FLAG_NAME(SAME_FORCE),
QUEUE_FLAG_NAME(DEAD),
QUEUE_FLAG_NAME(INIT_DONE),
{
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE_SAME:
return true; /* non-trivial splitting decisions */
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
break;
case REQ_OP_WRITE_SAME:
return blkdev_roset(bdev, mode, cmd, arg);
case BLKDISCARD:
return blk_ioctl_discard(bdev, mode, arg, 0);
- case BLKSECDISCARD:
- return blk_ioctl_discard(bdev, mode, arg,
- BLKDEV_DISCARD_SECURE);
case BLKZEROOUT:
return blk_ioctl_zeroout(bdev, mode, arg);
case BLKGETDISKSEQ:
dev->queue->limits.discard_alignment = dev->discard_alignment;
if (dev->max_discard_sectors)
blk_queue_flag_set(QUEUE_FLAG_DISCARD, dev->queue);
- if (dev->secure_discard)
- blk_queue_flag_set(QUEUE_FLAG_SECERASE, dev->queue);
blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
case RNBD_OP_DISCARD:
bio_opf = REQ_OP_DISCARD;
break;
- case RNBD_OP_SECURE_ERASE:
- bio_opf = REQ_OP_SECURE_ERASE;
- break;
case RNBD_OP_WRITE_SAME:
bio_opf = REQ_OP_WRITE_SAME;
break;
case REQ_OP_DISCARD:
rnbd_opf = RNBD_OP_DISCARD;
break;
- case REQ_OP_SECURE_ERASE:
- rnbd_opf = RNBD_OP_SECURE_ERASE;
- break;
case REQ_OP_WRITE_SAME:
rnbd_opf = RNBD_OP_WRITE_SAME;
break;
return queue_max_hw_sectors(bdev_get_queue(dev->bdev));
}
-static inline int rnbd_dev_get_secure_discard(const struct rnbd_dev *dev)
-{
- return blk_queue_secure_erase(bdev_get_queue(dev->bdev));
-}
-
static inline int rnbd_dev_get_max_discard_sects(const struct rnbd_dev *dev)
{
if (!blk_queue_discard(bdev_get_queue(dev->bdev)))
cpu_to_le32(rnbd_dev_get_discard_granularity(rnbd_dev));
rsp->discard_alignment =
cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev));
- rsp->secure_discard =
- cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev));
+ rsp->secure_discard = 0;
rsp->rotational = !blk_queue_nonrot(q);
rsp->cache_policy = 0;
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
}
ring->st_ds_req++;
- secure = (blkif->vbd.discard_secure &&
- (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
- BLKDEV_DISCARD_SECURE : 0;
-
err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
req->u.discard.nr_sectors,
- GFP_KERNEL, secure);
+ GFP_KERNEL, 0);
fail_response:
if (err == -EOPNOTSUPP) {
pr_debug("discard op failed, not supported\n");
/* Cached size parameter. */
sector_t size;
unsigned int flush_support:1;
- unsigned int discard_secure:1;
unsigned int feature_gnt_persistent:1;
unsigned int overflow_max_grants:1;
};
if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
vbd->flush_support = true;
- if (q && blk_queue_secure_erase(q))
- vbd->discard_secure = true;
-
vbd->feature_gnt_persistent = feature_persistent;
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
state = 1;
/* Optional. */
err = xenbus_printf(xbt, dev->nodename,
- "discard-secure", "%d",
- blkif->vbd.discard_secure);
+ "discard-secure", "%d", 0);
if (err) {
dev_warn(&dev->dev, "writing discard-secure (%d)", err);
return;
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
ring_req->u.discard.id = id;
ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
- if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
- ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
- else
- ring_req->u.discard.flag = 0;
+ ring_req->u.discard.flag = 0;
/* Copy the request to the ring page. */
*final_ring_req = *ring_req;
if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
return 1;
- if (unlikely(req_op(req) == REQ_OP_DISCARD ||
- req_op(req) == REQ_OP_SECURE_ERASE))
+ if (unlikely(req_op(req) == REQ_OP_DISCARD))
return blkif_queue_discard_req(req, rinfo);
- else
- return blkif_queue_rw_req(req, rinfo);
+ return blkif_queue_rw_req(req, rinfo);
}
static inline void flush_requests(struct blkfront_ring_info *rinfo)
rq->limits.discard_granularity = info->discard_granularity ?:
info->physical_sector_size;
rq->limits.discard_alignment = info->discard_alignment;
- if (info->feature_secdiscard)
- blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
}
/* Hard sector size and max sectors impersonate the equiv. hardware. */
info->feature_discard = 0;
info->feature_secdiscard = 0;
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
- blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
}
break;
case BLKIF_OP_FLUSH_DISKCACHE:
*/
if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
req_op(shadow[j].request) == REQ_OP_DISCARD ||
- req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
shadow[j].request->cmd_flags & REQ_FUA) {
/*
* Flush operations don't contain bios, so
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
- ti->num_secure_erase_bios = 0;
ti->num_write_same_bios = 0;
ti->num_write_zeroes_bios = 0;
return 0;
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
- ti->num_secure_erase_bios = 1;
ti->num_write_same_bios = 1;
ti->num_write_zeroes_bios = 1;
ti->private = lc;
ti->num_flush_bios = stripes;
ti->num_discard_bios = stripes;
- ti->num_secure_erase_bios = stripes;
ti->num_write_same_bios = stripes;
ti->num_write_zeroes_bios = stripes;
return DM_MAPIO_REMAPPED;
}
if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
- unlikely(bio_op(bio) == REQ_OP_SECURE_ERASE) ||
unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES) ||
unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) {
target_bio_nr = dm_bio_get_target_bio_nr(bio);
return true;
}
-static int device_not_secure_erase_capable(struct dm_target *ti,
- struct dm_dev *dev, sector_t start,
- sector_t len, void *data)
-{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !blk_queue_secure_erase(q);
-}
-
-static bool dm_table_supports_secure_erase(struct dm_table *t)
-{
- struct dm_target *ti;
- unsigned int i;
-
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
-
- if (!ti->num_secure_erase_bios)
- return false;
-
- if (!ti->type->iterate_devices ||
- ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
- return false;
- }
-
- return true;
-}
-
static int device_requires_stable_pages(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
} else
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
- if (dm_table_supports_secure_erase(t))
- blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
-
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
wc = true;
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE_ZEROES:
r = true;
case REQ_OP_DISCARD:
num_bios = ti->num_discard_bios;
break;
- case REQ_OP_SECURE_ERASE:
- num_bios = ti->num_secure_erase_bios;
- break;
case REQ_OP_WRITE_SAME:
num_bios = ti->num_write_same_bios;
break;
blk_mq_end_request(req, status);
}
-static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
- struct request *req)
-{
- struct mmc_blk_data *md = mq->blkdata;
- struct mmc_card *card = md->queue.card;
- unsigned int from, nr, arg;
- int err = 0, type = MMC_BLK_SECDISCARD;
- blk_status_t status = BLK_STS_OK;
-
- if (!(mmc_can_secure_erase_trim(card))) {
- status = BLK_STS_NOTSUPP;
- goto out;
- }
-
- from = blk_rq_pos(req);
- nr = blk_rq_sectors(req);
-
- if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
- arg = MMC_SECURE_TRIM1_ARG;
- else
- arg = MMC_SECURE_ERASE_ARG;
-
-retry:
- if (card->quirks & MMC_QUIRK_INAND_CMD38) {
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- INAND_CMD38_ARG_EXT_CSD,
- arg == MMC_SECURE_TRIM1_ARG ?
- INAND_CMD38_ARG_SECTRIM1 :
- INAND_CMD38_ARG_SECERASE,
- card->ext_csd.generic_cmd6_time);
- if (err)
- goto out_retry;
- }
-
- err = mmc_erase(card, from, nr, arg);
- if (err == -EIO)
- goto out_retry;
- if (err) {
- status = BLK_STS_IOERR;
- goto out;
- }
-
- if (arg == MMC_SECURE_TRIM1_ARG) {
- if (card->quirks & MMC_QUIRK_INAND_CMD38) {
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- INAND_CMD38_ARG_EXT_CSD,
- INAND_CMD38_ARG_SECTRIM2,
- card->ext_csd.generic_cmd6_time);
- if (err)
- goto out_retry;
- }
-
- err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
- if (err == -EIO)
- goto out_retry;
- if (err) {
- status = BLK_STS_IOERR;
- goto out;
- }
- }
-
-out_retry:
- if (err && !mmc_blk_reset(md, card->host, type))
- goto retry;
- if (!err)
- mmc_blk_reset_success(md, type);
-out:
- blk_mq_end_request(req, status);
-}
-
static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->blkdata;
case REQ_OP_DISCARD:
mmc_blk_issue_discard_rq(mq, req);
break;
- case REQ_OP_SECURE_ERASE:
- mmc_blk_issue_secdiscard_rq(mq, req);
- break;
case REQ_OP_FLUSH:
mmc_blk_issue_flush(mq, req);
break;
return 0;
}
-int mmc_can_secure_erase_trim(struct mmc_card *card)
-{
- if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
- !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
- return 1;
- return 0;
-}
-EXPORT_SYMBOL(mmc_can_secure_erase_trim);
-
int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
unsigned int nr)
{
int mmc_can_trim(struct mmc_card *card);
int mmc_can_discard(struct mmc_card *card);
int mmc_can_sanitize(struct mmc_card *card);
-int mmc_can_secure_erase_trim(struct mmc_card *card);
int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
unsigned int nr);
unsigned int mmc_calc_max_discard(struct mmc_card *card);
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
return MMC_ISSUE_SYNC;
case REQ_OP_FLUSH:
return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
/* granularity must not be greater than max. discard */
if (card->pref_erase > max_discard)
q->limits.discard_granularity = SECTOR_SIZE;
- if (mmc_can_secure_erase_trim(card))
- blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
}
static unsigned short mmc_get_max_segments(struct mmc_host *host)
return -ENXIO;
if (flags & F2FS_TRIM_FILE_DISCARD)
- ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
- blk_queue_secure_erase(q) ?
- BLKDEV_DISCARD_SECURE : 0);
+ ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS, 0);
if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
if (IS_ENCRYPTED(inode))
if (bio &&
bio->bi_iter.bi_size &&
bio_op(bio) != REQ_OP_DISCARD &&
- bio_op(bio) != REQ_OP_SECURE_ERASE &&
bio_op(bio) != REQ_OP_WRITE_ZEROES)
return true;
static inline bool bio_no_advance_iter(const struct bio *bio)
{
return bio_op(bio) == REQ_OP_DISCARD ||
- bio_op(bio) == REQ_OP_SECURE_ERASE ||
bio_op(bio) == REQ_OP_WRITE_SAME ||
bio_op(bio) == REQ_OP_WRITE_ZEROES;
}
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
return 0;
case REQ_OP_WRITE_SAME:
REQ_OP_FLUSH = 2,
/* discard sectors */
REQ_OP_DISCARD = 3,
- /* securely erase sectors */
- REQ_OP_SECURE_ERASE = 5,
/* write the same sector many times */
REQ_OP_WRITE_SAME = 7,
/* write the zero filled sector many times */
#define QUEUE_FLAG_DISCARD 8 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
-#define QUEUE_FLAG_SECERASE 11 /* supports secure erase */
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
#define blk_queue_zone_resetall(q) \
test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
-#define blk_queue_secure_erase(q) \
- (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
#define blk_queue_pci_p2pdma(q) \
test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
int op)
{
- if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
+ if (unlikely(op == REQ_OP_DISCARD))
return min(q->limits.max_discard_sectors,
UINT_MAX >> SECTOR_SHIFT);
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
-#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
-
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
*/
unsigned num_discard_bios;
- /*
- * The number of secure erase bios that will be submitted to the target.
- * The bio number can be accessed with dm_bio_get_target_bio_nr.
- */
- unsigned num_secure_erase_bios;
-
/*
* The number of WRITE SAME bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
what |= MASK_TC_BIT(op_flags, META);
what |= MASK_TC_BIT(op_flags, PREFLUSH);
what |= MASK_TC_BIT(op_flags, FUA);
- if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
+ if (op == REQ_OP_DISCARD)
what |= BLK_TC_ACT(BLK_TC_DISCARD);
if (op == REQ_OP_FLUSH)
what |= BLK_TC_ACT(BLK_TC_FLUSH);
case REQ_OP_DISCARD:
rwbs[i++] = 'D';
break;
- case REQ_OP_SECURE_ERASE:
- rwbs[i++] = 'D';
- rwbs[i++] = 'E';
- break;
case REQ_OP_FLUSH:
rwbs[i++] = 'F';
break;