/*
* Copyright (C) 2025 Christoph Hellwig
*/
+#include <linux/blk-integrity.h>
#include <linux/blk-mq-dma.h>
#include "blk.h"
u32 len;
};
+static bool __blk_map_iter_next(struct blk_map_iter *iter)
+{
+ if (iter->iter.bi_size)
+ return true;
+ if (!iter->bio || !iter->bio->bi_next)
+ return false;
+
+ iter->bio = iter->bio->bi_next;
+ if (iter->is_integrity) {
+ iter->iter = bio_integrity(iter->bio)->bip_iter;
+ iter->bvecs = bio_integrity(iter->bio)->bip_vec;
+ } else {
+ iter->iter = iter->bio->bi_iter;
+ iter->bvecs = iter->bio->bi_io_vec;
+ }
+ return true;
+}
+
static bool blk_map_iter_next(struct request *req, struct blk_map_iter *iter,
struct phys_vec *vec)
{
while (!iter->iter.bi_size || !iter->iter.bi_bvec_done) {
struct bio_vec next;
- if (!iter->iter.bi_size) {
- if (!iter->bio || !iter->bio->bi_next)
- break;
- iter->bio = iter->bio->bi_next;
- iter->iter = iter->bio->bi_iter;
- iter->bvecs = iter->bio->bi_io_vec;
- }
+ if (!__blk_map_iter_next(iter))
+ break;
next = mp_bvec_iter_bvec(iter->bvecs, iter->iter);
if (bv.bv_len + next.bv_len > max_size ||
return nsegs;
}
EXPORT_SYMBOL(__blk_rq_map_sg);
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+/**
+ * blk_rq_integrity_dma_map_iter_start - map the first integrity DMA segment
+ * for a request
+ * @req: request to map
+ * @dma_dev: device to map to
+ * @state: DMA IOVA state
+ * @iter: block layer DMA iterator
+ *
+ * Start DMA mapping @req integrity data to @dma_dev. @state and @iter are
+ * provided by the caller and don't need to be initialized. @state needs to be
+ * stored for use at unmap time, @iter is only needed at map time.
+ *
+ * Returns %false if there is no segment to map, including due to an error, or
+ * %true if it did map a segment.
+ *
+ * If a segment was mapped, the DMA address for it is returned in @iter.addr
+ * and the length in @iter.len. If no segment was mapped the status code is
+ * returned in @iter.status.
+ *
+ * The caller can call blk_rq_dma_map_coalesce() to check if further segments
+ * need to be mapped after this, or go straight to blk_rq_dma_map_iter_next()
+ * to try to map the following segments.
+ */
+bool blk_rq_integrity_dma_map_iter_start(struct request *req,
+ struct device *dma_dev, struct dma_iova_state *state,
+ struct blk_dma_iter *iter)
+{
+ unsigned len = bio_integrity_bytes(&req->q->limits.integrity,
+ blk_rq_sectors(req));
+ struct bio *bio = req->bio;
+
+ iter->iter = (struct blk_map_iter) {
+ .bio = bio,
+ .iter = bio_integrity(bio)->bip_iter,
+ .bvecs = bio_integrity(bio)->bip_vec,
+ .is_integrity = true,
+ };
+ return blk_dma_map_iter_start(req, dma_dev, state, iter, len);
+}
+EXPORT_SYMBOL_GPL(blk_rq_integrity_dma_map_iter_start);
+
+/**
+ * blk_rq_integrity_dma_map_iter_start - map the next integrity DMA segment for
+ * a request
+ * @req: request to map
+ * @dma_dev: device to map to
+ * @state: DMA IOVA state
+ * @iter: block layer DMA iterator
+ *
+ * Iterate to the next integrity mapping after a previous call to
+ * blk_rq_integrity_dma_map_iter_start(). See there for a detailed description
+ * of the arguments.
+ *
+ * Returns %false if there is no segment to map, including due to an error, or
+ * %true if it did map a segment.
+ *
+ * If a segment was mapped, the DMA address for it is returned in @iter.addr and
+ * the length in @iter.len. If no segment was mapped the status code is
+ * returned in @iter.status.
+ */
+bool blk_rq_integrity_dma_map_iter_next(struct request *req,
+ struct device *dma_dev, struct blk_dma_iter *iter)
+{
+ struct phys_vec vec;
+
+ if (!blk_map_iter_next(req, &iter->iter, &vec))
+ return false;
+
+ if (iter->p2pdma.map == PCI_P2PDMA_MAP_BUS_ADDR)
+ return blk_dma_map_bus(iter, &vec);
+ return blk_dma_map_direct(req, dma_dev, iter, &vec);
+}
+EXPORT_SYMBOL_GPL(blk_rq_integrity_dma_map_iter_next);
+#endif
#include <linux/blk-mq.h>
#include <linux/bio-integrity.h>
+#include <linux/blk-mq-dma.h>
struct request;
ssize_t bytes);
int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd,
struct logical_block_metadata_cap __user *argp);
+bool blk_rq_integrity_dma_map_iter_start(struct request *req,
+ struct device *dma_dev, struct dma_iova_state *state,
+ struct blk_dma_iter *iter);
+bool blk_rq_integrity_dma_map_iter_next(struct request *req,
+ struct device *dma_dev, struct blk_dma_iter *iter);
static inline bool
blk_integrity_queue_supports_integrity(struct request_queue *q)
{
return -EINVAL;
}
+static inline bool blk_rq_integrity_dma_map_iter_start(struct request *req,
+ struct device *dma_dev, struct dma_iova_state *state,
+ struct blk_dma_iter *iter)
+{
+ return false;
+}
+static inline bool blk_rq_integrity_dma_map_iter_next(struct request *req,
+ struct device *dma_dev, struct blk_dma_iter *iter)
+{
+ return false;
+}
static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
{
return NULL;