kfree(data);
 }
 
-static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
-                                      struct iommu_iotlb_gather *gather,
-                                      unsigned long iova, size_t size,
-                                      arm_lpae_iopte blk_pte, int lvl,
-                                      arm_lpae_iopte *ptep, size_t pgcount)
-{
-       struct io_pgtable_cfg *cfg = &data->iop.cfg;
-       arm_lpae_iopte pte, *tablep;
-       phys_addr_t blk_paddr;
-       size_t tablesz = ARM_LPAE_GRANULE(data);
-       size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
-       int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
-       int i, unmap_idx_start = -1, num_entries = 0, max_entries;
-
-       if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
-               return 0;
-
-       tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, data->iop.cookie);
-       if (!tablep)
-               return 0; /* Bytes unmapped */
-
-       if (size == split_sz) {
-               unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
-               max_entries = arm_lpae_max_entries(unmap_idx_start, data);
-               num_entries = min_t(int, pgcount, max_entries);
-       }
-
-       blk_paddr = iopte_to_paddr(blk_pte, data);
-       pte = iopte_prot(blk_pte);
-
-       for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) {
-               /* Unmap! */
-               if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries))
-                       continue;
-
-               __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
-       }
-
-       pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
-       if (pte != blk_pte) {
-               __arm_lpae_free_pages(tablep, tablesz, cfg, data->iop.cookie);
-               /*
-                * We may race against someone unmapping another part of this
-                * block, but anything else is invalid. We can't misinterpret
-                * a page entry here since we're never at the last level.
-                */
-               if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE)
-                       return 0;
-
-               tablep = iopte_deref(pte, data);
-       } else if (unmap_idx_start >= 0) {
-               for (i = 0; i < num_entries; i++)
-                       io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size);
-
-               return num_entries * size;
-       }
-
-       return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
-}
-
 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
                               struct iommu_iotlb_gather *gather,
                               unsigned long iova, size_t size, size_t pgcount,
 
                return i * size;
        } else if (iopte_leaf(pte, lvl, iop->fmt)) {
-               /*
-                * Insert a table at the next level to map the old region,
-                * minus the part we want to unmap
-                */
-               return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
-                                               lvl + 1, ptep, pgcount);
+               WARN_ONCE(true, "Unmap of a partial large IOPTE is not allowed");
+               return 0;
        }
 
        /* Keep on walkin' */
                        iova += SZ_1G;
                }
 
-               /* Partial unmap */
-               size = 1UL << __ffs(cfg->pgsize_bitmap);
-               if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size)
-                       return __FAIL(ops, i);
-
-               /* Remap of partial unmap */
-               if (ops->map_pages(ops, SZ_1G + size, size, size, 1,
-                                  IOMMU_READ, GFP_KERNEL, &mapped))
-                       return __FAIL(ops, i);
-
-               if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
-                       return __FAIL(ops, i);
-
                /* Full unmap */
                iova = 0;
                for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {