]> www.infradead.org Git - users/griffoul/linux.git/commitdiff
blk-mq-dma: require unmap caller provide p2p map type
authorKeith Busch <kbusch@kernel.org>
Wed, 13 Aug 2025 15:31:47 +0000 (08:31 -0700)
committerJens Axboe <axboe@kernel.dk>
Mon, 25 Aug 2025 13:44:39 +0000 (07:44 -0600)
In preparing for integrity dma mappings, we can't rely on the request
flag because data and metadata may have different mapping types.

Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Link: https://lore.kernel.org/r/20250813153153.3260897-4-kbusch@meta.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/host/pci.c
include/linux/blk-mq-dma.h

index 2c6d9506b172509fb35716eba456c375f52f5b86..111b6bc6c93eb1142484b66883c482795ae14089 100644 (file)
@@ -261,6 +261,9 @@ enum nvme_iod_flags {
 
        /* single segment dma mapping */
        IOD_SINGLE_SEGMENT      = 1U << 2,
+
+       /* DMA mapped with PCI_P2PDMA_MAP_BUS_ADDR */
+       IOD_P2P_BUS_ADDR        = 1U << 3,
 };
 
 struct nvme_dma_vec {
@@ -725,7 +728,8 @@ static void nvme_unmap_data(struct request *req)
                return;
        }
 
-       if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) {
+       if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len,
+                               iod->flags & IOD_P2P_BUS_ADDR)) {
                if (nvme_pci_cmd_use_sgl(&iod->cmd))
                        nvme_free_sgls(req);
                else
@@ -1000,6 +1004,9 @@ static blk_status_t nvme_map_data(struct request *req)
        if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter))
                return iter.status;
 
+       if (iter.p2pdma.map == PCI_P2PDMA_MAP_BUS_ADDR)
+               iod->flags |= IOD_P2P_BUS_ADDR;
+
        if (use_sgl == SGL_FORCED ||
            (use_sgl == SGL_SUPPORTED &&
             (sgl_threshold && nvme_pci_avg_seg_size(req) >= sgl_threshold)))
index e5cb5e46fc928e50273824944469ca3df19a2027..881880095e0da903c2ae83447eb1b32b081aaba5 100644 (file)
@@ -47,14 +47,15 @@ static inline bool blk_rq_dma_map_coalesce(struct dma_iova_state *state)
  * @dma_dev:   device to unmap from
  * @state:     DMA IOVA state
  * @mapped_len: number of bytes to unmap
+ * @is_p2p:    true if mapped with PCI_P2PDMA_MAP_BUS_ADDR
  *
  * Returns %false if the callers need to manually unmap every DMA segment
  * mapped using @iter or %true if no work is left to be done.
  */
 static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
-               struct dma_iova_state *state, size_t mapped_len)
+               struct dma_iova_state *state, size_t mapped_len, bool is_p2p)
 {
-       if (req->cmd_flags & REQ_P2PDMA)
+       if (is_p2p)
                return true;
 
        if (dma_use_iova(state)) {