/* single segment dma mapping */
IOD_SINGLE_SEGMENT = 1U << 2,
+
+ /* DMA mapped with PCI_P2PDMA_MAP_BUS_ADDR */
+ IOD_P2P_BUS_ADDR = 1U << 3,
};
struct nvme_dma_vec {
return;
}
- if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) {
+ if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len,
+ iod->flags & IOD_P2P_BUS_ADDR)) {
if (nvme_pci_cmd_use_sgl(&iod->cmd))
nvme_free_sgls(req);
else
if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter))
return iter.status;
+ if (iter.p2pdma.map == PCI_P2PDMA_MAP_BUS_ADDR)
+ iod->flags |= IOD_P2P_BUS_ADDR;
+
if (use_sgl == SGL_FORCED ||
(use_sgl == SGL_SUPPORTED &&
(sgl_threshold && nvme_pci_avg_seg_size(req) >= sgl_threshold)))
* @dma_dev: device to unmap from
* @state: DMA IOVA state
* @mapped_len: number of bytes to unmap
+ * @is_p2p: true if mapped with PCI_P2PDMA_MAP_BUS_ADDR
*
* Returns %false if the callers need to manually unmap every DMA segment
* mapped using @iter or %true if no work is left to be done.
*/
static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
- struct dma_iova_state *state, size_t mapped_len)
+ struct dma_iova_state *state, size_t mapped_len, bool is_p2p)
{
- if (req->cmd_flags & REQ_P2PDMA)
+ if (is_p2p)
return true;
if (dma_use_iova(state)) {