}
 EXPORT_SYMBOL_GPL(iommu_clear_tce);
 
-int iommu_clear_tces_and_put_pages(struct iommu_table *tbl,
-               unsigned long entry, unsigned long pages)
-{
-       unsigned long oldtce;
-       struct page *page;
-
-       for ( ; pages; --pages, ++entry) {
-               oldtce = iommu_clear_tce(tbl, entry);
-               if (!oldtce)
-                       continue;
-
-               page = pfn_to_page(oldtce >> PAGE_SHIFT);
-               WARN_ON(!page);
-               if (page) {
-                       if (oldtce & TCE_PCI_WRITE)
-                               SetPageDirty(page);
-                       put_page(page);
-               }
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(iommu_clear_tces_and_put_pages);
-
 /*
  * hwaddr is a kernel virtual address here (0xc... bazillion),
  * tce_build converts it to a physical address.
 }
 EXPORT_SYMBOL_GPL(iommu_tce_build);
 
-int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
-               unsigned long tce)
-{
-       int ret;
-       struct page *page = NULL;
-       unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
-       enum dma_data_direction direction = iommu_tce_direction(tce);
-
-       ret = get_user_pages_fast(tce & PAGE_MASK, 1,
-                       direction != DMA_TO_DEVICE, &page);
-       if (unlikely(ret != 1)) {
-               /* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n",
-                               tce, entry << tbl->it_page_shift, ret); */
-               return -EFAULT;
-       }
-       hwaddr = (unsigned long) page_address(page) + offset;
-
-       ret = iommu_tce_build(tbl, entry, hwaddr, direction);
-       if (ret)
-               put_page(page);
-
-       if (ret < 0)
-               pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n",
-                       __func__, entry << tbl->it_page_shift, tce, ret);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_put_tce_user_mode);
-
 int iommu_take_ownership(struct iommu_table *tbl)
 {
        unsigned long sz = (tbl->it_size + 7) >> 3;
        }
 
        memset(tbl->it_map, 0xff, sz);
-       iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
 
        /*
         * Disable iommu bypass, otherwise the user can DMA to all of
 {
        unsigned long sz = (tbl->it_size + 7) >> 3;
 
-       iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
        memset(tbl->it_map, 0, sz);
 
        /* Restore bit#0 set by iommu_init_table() */
 
        kfree(container);
 }
 
+static int tce_iommu_clear(struct tce_container *container,
+               struct iommu_table *tbl,
+               unsigned long entry, unsigned long pages)
+{
+       unsigned long oldtce;
+       struct page *page;
+
+       for ( ; pages; --pages, ++entry) {
+               oldtce = iommu_clear_tce(tbl, entry);
+               if (!oldtce)
+                       continue;
+
+               page = pfn_to_page(oldtce >> PAGE_SHIFT);
+               WARN_ON(!page);
+               if (page) {
+                       if (oldtce & TCE_PCI_WRITE)
+                               SetPageDirty(page);
+                       put_page(page);
+               }
+       }
+
+       return 0;
+}
+
+static long tce_iommu_build(struct tce_container *container,
+               struct iommu_table *tbl,
+               unsigned long entry, unsigned long tce, unsigned long pages)
+{
+       long i, ret = 0;
+       struct page *page = NULL;
+       unsigned long hva;
+       enum dma_data_direction direction = iommu_tce_direction(tce);
+
+       for (i = 0; i < pages; ++i) {
+               unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
+
+               ret = get_user_pages_fast(tce & PAGE_MASK, 1,
+                               direction != DMA_TO_DEVICE, &page);
+               if (unlikely(ret != 1)) {
+                       ret = -EFAULT;
+                       break;
+               }
+               hva = (unsigned long) page_address(page) + offset;
+
+               ret = iommu_tce_build(tbl, entry + i, hva, direction);
+               if (ret) {
+                       put_page(page);
+                       pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
+                                       __func__, entry << tbl->it_page_shift,
+                                       tce, ret);
+                       break;
+               }
+               tce += IOMMU_PAGE_SIZE_4K;
+       }
+
+       if (ret)
+               tce_iommu_clear(container, tbl, entry, i);
+
+       return ret;
+}
+
 static long tce_iommu_ioctl(void *iommu_data,
                                 unsigned int cmd, unsigned long arg)
 {
        case VFIO_IOMMU_MAP_DMA: {
                struct vfio_iommu_type1_dma_map param;
                struct iommu_table *tbl = container->tbl;
-               unsigned long tce, i;
+               unsigned long tce;
 
                if (!tbl)
                        return -ENXIO;
                if (ret)
                        return ret;
 
-               for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT_4K); ++i) {
-                       ret = iommu_put_tce_user_mode(tbl,
-                                       (param.iova >> IOMMU_PAGE_SHIFT_4K) + i,
-                                       tce);
-                       if (ret)
-                               break;
-                       tce += IOMMU_PAGE_SIZE_4K;
-               }
-               if (ret)
-                       iommu_clear_tces_and_put_pages(tbl,
-                                       param.iova >> IOMMU_PAGE_SHIFT_4K, i);
+               ret = tce_iommu_build(container, tbl,
+                               param.iova >> IOMMU_PAGE_SHIFT_4K,
+                               tce, param.size >> IOMMU_PAGE_SHIFT_4K);
 
                iommu_flush_tce(tbl);
 
                if (ret)
                        return ret;
 
-               ret = iommu_clear_tces_and_put_pages(tbl,
+               ret = tce_iommu_clear(container, tbl,
                                param.iova >> IOMMU_PAGE_SHIFT_4K,
                                param.size >> IOMMU_PAGE_SHIFT_4K);
                iommu_flush_tce(tbl);
                /* pr_debug("tce_vfio: detaching group #%u from iommu %p\n",
                                iommu_group_id(iommu_group), iommu_group); */
                container->tbl = NULL;
+               tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
                iommu_release_ownership(tbl);
        }
        mutex_unlock(&container->lock);