void *vaddr;
};
+typedef u64 iova_t;
+
+#define INVALID_IOVA UINT64_MAX
+
+struct vfio_dma_region {
+ struct list_head link;
+ void *vaddr;
+ iova_t iova;
+ u64 size;
+};
+
struct vfio_pci_device {
int fd;
int group_fd;
struct vfio_irq_info msi_info;
struct vfio_irq_info msix_info;
+ struct list_head dma_regions;
+
/* eventfds for MSI and MSI-x interrupts */
int msi_eventfds[PCI_MSIX_FLAGS_QSIZE + 1];
};
void vfio_pci_device_cleanup(struct vfio_pci_device *device);
void vfio_pci_device_reset(struct vfio_pci_device *device);
-void vfio_pci_dma_map(struct vfio_pci_device *device, u64 iova, u64 size,
- void *vaddr);
-void vfio_pci_dma_unmap(struct vfio_pci_device *device, u64 iova, u64 size);
+void vfio_pci_dma_map(struct vfio_pci_device *device,
+ struct vfio_dma_region *region);
+void vfio_pci_dma_unmap(struct vfio_pci_device *device,
+ struct vfio_dma_region *region);
void vfio_pci_config_access(struct vfio_pci_device *device, bool write,
size_t config, size_t size, void *data);
vfio_pci_irq_disable(device, VFIO_PCI_MSIX_IRQ_INDEX);
}
+iova_t __to_iova(struct vfio_pci_device *device, void *vaddr);
+iova_t to_iova(struct vfio_pci_device *device, void *vaddr);
+
#endif /* SELFTESTS_VFIO_LIB_INCLUDE_VFIO_UTIL_H */
VFIO_ASSERT_EQ(__ret, 0, "ioctl(%s, %s, %s) returned %d\n", #_fd, #_op, #_arg, __ret); \
} while (0)
+iova_t __to_iova(struct vfio_pci_device *device, void *vaddr)
+{
+ struct vfio_dma_region *region;
+
+ list_for_each_entry(region, &device->dma_regions, link) {
+ if (vaddr < region->vaddr)
+ continue;
+
+ if (vaddr >= region->vaddr + region->size)
+ continue;
+
+ return region->iova + (vaddr - region->vaddr);
+ }
+
+ return INVALID_IOVA;
+}
+
+iova_t to_iova(struct vfio_pci_device *device, void *vaddr)
+{
+ iova_t iova;
+
+ iova = __to_iova(device, vaddr);
+ VFIO_ASSERT_NE(iova, INVALID_IOVA, "%p is not mapped into device.\n", vaddr);
+
+ return iova;
+}
+
static void vfio_pci_irq_set(struct vfio_pci_device *device,
u32 index, u32 vector, u32 count, int *fds)
{
ioctl_assert(device->fd, VFIO_DEVICE_GET_IRQ_INFO, irq_info);
}
-void vfio_pci_dma_map(struct vfio_pci_device *device, u64 iova, u64 size, void *vaddr)
+void vfio_pci_dma_map(struct vfio_pci_device *device,
+ struct vfio_dma_region *region)
{
struct vfio_iommu_type1_dma_map map = {
.argsz = sizeof(map),
.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
- .vaddr = (u64)vaddr,
- .iova = iova,
- .size = size,
+ .vaddr = (u64)region->vaddr,
+ .iova = region->iova,
+ .size = region->size,
};
ioctl_assert(device->container_fd, VFIO_IOMMU_MAP_DMA, &map);
+
+ list_add(®ion->link, &device->dma_regions);
}
-void vfio_pci_dma_unmap(struct vfio_pci_device *device, u64 iova, u64 size)
+void vfio_pci_dma_unmap(struct vfio_pci_device *device,
+ struct vfio_dma_region *region)
{
struct vfio_iommu_type1_dma_unmap unmap = {
.argsz = sizeof(unmap),
- .iova = iova,
- .size = size,
+ .iova = region->iova,
+ .size = region->size,
};
ioctl_assert(device->container_fd, VFIO_IOMMU_UNMAP_DMA, &unmap);
+
+ list_del(®ion->link);
}
static void vfio_pci_region_get(struct vfio_pci_device *device, int index,
{
int ret;
+ INIT_LIST_HEAD(&device->dma_regions);
+
ret = ioctl(device->container_fd, VFIO_CHECK_EXTENSION, iommu_type);
VFIO_ASSERT_GT(ret, 0, "VFIO IOMMU type %lu not supported\n", iommu_type);
{
const u64 size = variant->size ?: getpagesize();
const int flags = variant->mmap_flags;
+ struct vfio_dma_region region;
struct iommu_mapping mapping;
- void *mem;
- u64 iova;
int rc;
- mem = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0);
+ region.vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0);
/* Skip the test if there aren't enough HugeTLB pages available. */
- if (flags & MAP_HUGETLB && mem == MAP_FAILED)
+ if (flags & MAP_HUGETLB && region.vaddr == MAP_FAILED)
SKIP(return, "mmap() failed: %s (%d)\n", strerror(errno), errno);
else
- ASSERT_NE(mem, MAP_FAILED);
+ ASSERT_NE(region.vaddr, MAP_FAILED);
- iova = (u64)mem;
+ region.iova = (u64)region.vaddr;
+ region.size = size;
- vfio_pci_dma_map(self->device, iova, size, mem);
- printf("Mapped HVA %p (size 0x%lx) at IOVA 0x%lx\n", mem, size, iova);
+ vfio_pci_dma_map(self->device, ®ion);
+ printf("Mapped HVA %p (size 0x%lx) at IOVA 0x%lx\n", region.vaddr, size, region.iova);
- rc = iommu_mapping_get(device_bdf, iova, &mapping);
+ ASSERT_EQ(region.iova, to_iova(self->device, region.vaddr));
+
+ rc = iommu_mapping_get(device_bdf, region.iova, &mapping);
if (rc == -EOPNOTSUPP)
goto unmap;
ASSERT_EQ(0, rc);
- printf("Found IOMMU mappings for IOVA 0x%lx:\n", iova);
+ printf("Found IOMMU mappings for IOVA 0x%lx:\n", region.iova);
printf("PGD: 0x%016lx\n", mapping.pgd);
printf("P4D: 0x%016lx\n", mapping.p4d);
printf("PUD: 0x%016lx\n", mapping.pud);
}
unmap:
- vfio_pci_dma_unmap(self->device, iova, size);
- printf("Unmapped IOVA 0x%lx\n", iova);
- ASSERT_NE(0, iommu_mapping_get(device_bdf, iova, &mapping));
+ vfio_pci_dma_unmap(self->device, ®ion);
+ printf("Unmapped IOVA 0x%lx\n", region.iova);
+ ASSERT_EQ(INVALID_IOVA, __to_iova(self->device, region.vaddr));
+ ASSERT_NE(0, iommu_mapping_get(device_bdf, region.iova, &mapping));
- ASSERT_TRUE(!munmap(mem, size));
+ ASSERT_TRUE(!munmap(region.vaddr, size));
}
int main(int argc, char *argv[])