* significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
  * hugetlbfs is in use.
  */
-static void vfio_test_domain_fgsp(struct vfio_domain *domain)
+static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
 {
-       struct page *pages;
        int ret, order = get_order(PAGE_SIZE * 2);
+       struct vfio_iova *region;
+       struct page *pages;
+       dma_addr_t start;
 
        pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
        if (!pages)
                return;
 
-       ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
-                       IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
-       if (!ret) {
-               size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
+       list_for_each_entry(region, regions, list) {
+               start = ALIGN(region->start, PAGE_SIZE * 2);
+               if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
+                       continue;
 
-               if (unmapped == PAGE_SIZE)
-                       iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
-               else
-                       domain->fgsp = true;
+               ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
+                               IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
+               if (!ret) {
+                       size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
+
+                       if (unmapped == PAGE_SIZE)
+                               iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
+                       else
+                               domain->fgsp = true;
+               }
+               break;
        }
 
        __free_pages(pages, order);
                }
        }
 
-       vfio_test_domain_fgsp(domain);
+       vfio_test_domain_fgsp(domain, &iova_copy);
 
        /* replay mappings on new domains */
        ret = vfio_iommu_replay(iommu, domain);