static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
static int iommu_batch_initialized;
-
-unsigned long iommu_getbypass(unsigned long ra,
- unsigned long attr,
- unsigned long *io_addr_p)
-{
- struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
- struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
- unsigned long devhandle = pbm->devhandle;
- unsigned long ret;
-
- ret = pci_sun4v_iommu_getbypass(devhandle, ra, attr, io_addr_p);
-
- printk(KERN_ERR "iommu_getbypass: devhandle 0x%lx ra 0x%lx prot 0x%lx dma 0x%lx\n",
- devhandle, ra, attr, *io_addr_p);
-
- if (ret)
- printk(KERN_ERR "iommu_getbypass: err 0x%lx\n", ret);
-
- return ret;
-}
-EXPORT_SYMBOL(iommu_getbypass);
-
/* Interrupts must be disabled. */
static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
{
free_pages((unsigned long)cpu, order);
}
-static dma_addr_t dma_4v_map_page_bypass(struct device *dev, struct page *page,
- unsigned long offset, size_t sz,
- enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- struct pci_pbm_info *pbm;
- unsigned long devhandle;
- unsigned long ra;
- unsigned long prot;
- unsigned long dma_addr;
-
- if (unlikely(direction == DMA_NONE))
- goto bad;
-
- prot = HV_PCI_MAP_ATTR_READ;
- if (direction != DMA_TO_DEVICE)
- prot |= HV_PCI_MAP_ATTR_WRITE;
-
- /* VPCI maj=2, min=[0,1] or greater supports relax ordering */
- if (dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs) && vpci_major >= 2)
- prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
-
- pbm = dev->archdata.host_controller;
- devhandle = pbm->devhandle;
- ra = __pa(page_address(page) + offset);
- if (pci_sun4v_iommu_getbypass(devhandle, ra, prot, &dma_addr))
- goto bad;
-
- return dma_addr;
-
-bad:
- return DMA_ERROR_CODE;
-}
-
static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t sz,
enum dma_data_direction direction,
if (IS_IB_DEVICE(pdev))
return dma_4v_map_page_bypass(dev, page, offset, sz,
- direction, attrs);
+ direction, attrs);
iommu = dev->archdata.iommu;
long entry;
u32 devhandle;
- /* IB uses bypass, no need to un-map bypass dma address */
+ /* IB uses bypass, no need to unmap for bypass */
if (IS_IB_DEVICE(pdev))
return;
iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
}
-static int dma_4v_map_sg_bypass(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- struct pci_pbm_info *pbm;
- unsigned long devhandle;
- unsigned long ra;
- unsigned long prot;
- unsigned long dma_addr;
- struct scatterlist *s;
- int i;
-
- if (unlikely(direction == DMA_NONE))
- goto bad;
-
- prot = HV_PCI_MAP_ATTR_READ;
- if (direction != DMA_TO_DEVICE)
- prot |= HV_PCI_MAP_ATTR_WRITE;
-
- /* VPCI maj=2, min=[0,1] or greater supports relax ordering */
- if (dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs) && vpci_major >= 2)
- prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
-
- pbm = dev->archdata.host_controller;
- devhandle = pbm->devhandle;
- for_each_sg(sglist, s, nelems, i) {
- ra = (unsigned long)SG_ENT_PHYS_ADDRESS(s);
- if (pci_sun4v_iommu_getbypass(devhandle, ra, prot, &dma_addr))
- goto bad;
- s->dma_address = dma_addr;
- s->dma_length = s->length;
- }
-
- return nelems;
-
-bad:
- return 0;
-}
-
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
struct dma_attrs *attrs)
unsigned long flags, entry;
u32 devhandle;
- /* IB uses bypass, no need to un-map bypass dma address */
+ /* IB uses bypass, no need to unmap for bypass */
if (IS_IB_DEVICE(pdev))
return;