WARN_ON_ONCE(bio_full(bio, len));
if (is_pci_p2pdma_page(page))
- bio->bi_opf |= REQ_P2PDMA | REQ_NOMERGE;
+ bio->bi_opf |= REQ_NOMERGE;
bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off);
bio->bi_iter.bi_size += len;
if (!blk_map_iter_next(req, &iter->iter, &vec))
return false;
- if (IS_ENABLED(CONFIG_PCI_P2PDMA) && (req->cmd_flags & REQ_P2PDMA)) {
- switch (pci_p2pdma_state(&iter->p2pdma, dma_dev,
- phys_to_page(vec.paddr))) {
- case PCI_P2PDMA_MAP_BUS_ADDR:
- return blk_dma_map_bus(iter, &vec);
- case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
- /*
- * P2P transfers through the host bridge are treated the
- * same as non-P2P transfers below and during unmap.
- */
- req->cmd_flags &= ~REQ_P2PDMA;
- break;
- default:
- iter->status = BLK_STS_INVAL;
- return false;
- }
+ switch (pci_p2pdma_state(&iter->p2pdma, dma_dev,
+ phys_to_page(vec.paddr))) {
+ case PCI_P2PDMA_MAP_BUS_ADDR:
+ return blk_dma_map_bus(iter, &vec);
+ case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ /*
+ * P2P transfers through the host bridge are treated the
+ * same as non-P2P transfers below and during unmap.
+ */
+ case PCI_P2PDMA_MAP_NONE:
+ break;
+ default:
+ iter->status = BLK_STS_INVAL;
+ return false;
}
if (blk_can_dma_map_iova(req, dma_dev) &&
__REQ_DRV, /* for driver use */
__REQ_FS_PRIVATE, /* for file system (submitter) use */
__REQ_ATOMIC, /* for atomic write operations */
- __REQ_P2PDMA, /* contains P2P DMA pages */
/*
* Command specific flags, keep last:
*/
#define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
#define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
#define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC)
-#define REQ_P2PDMA (__force blk_opf_t)(1ULL << __REQ_P2PDMA)
#define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)