extern int iommu_is_off;
 extern int iommu_force_on;
 
+struct iommu_table_ops {
+       int (*set)(struct iommu_table *tbl,
+                       long index, long npages,
+                       unsigned long uaddr,
+                       enum dma_data_direction direction,
+                       struct dma_attrs *attrs);
+       void (*clear)(struct iommu_table *tbl,
+                       long index, long npages);
+       unsigned long (*get)(struct iommu_table *tbl, long index);
+       void (*flush)(struct iommu_table *tbl);
+};
+
+/* These are used by VIO */
+extern struct iommu_table_ops iommu_table_lpar_multi_ops;
+extern struct iommu_table_ops iommu_table_pseries_ops;
+
 /*
  * IOMAP_MAX_ORDER defines the largest contiguous block
  * of dma space we can get.  IOMAP_MAX_ORDER = 13
 #ifdef CONFIG_IOMMU_API
        struct iommu_group *it_group;
 #endif
+       struct iommu_table_ops *it_ops;
        void (*set_bypass)(struct iommu_table *tbl, bool enable);
 #ifdef CONFIG_PPC_POWERNV
        void           *data;
 
         * destroyed as well */
        void            (*hpte_clear_all)(void);
 
-       int             (*tce_build)(struct iommu_table *tbl,
-                                    long index,
-                                    long npages,
-                                    unsigned long uaddr,
-                                    enum dma_data_direction direction,
-                                    struct dma_attrs *attrs);
-       void            (*tce_free)(struct iommu_table *tbl,
-                                   long index,
-                                   long npages);
-       unsigned long   (*tce_get)(struct iommu_table *tbl,
-                                   long index);
-       void            (*tce_flush)(struct iommu_table *tbl);
-
-       /* _rm versions are for real mode use only */
-       int             (*tce_build_rm)(struct iommu_table *tbl,
-                                    long index,
-                                    long npages,
-                                    unsigned long uaddr,
-                                    enum dma_data_direction direction,
-                                    struct dma_attrs *attrs);
-       void            (*tce_free_rm)(struct iommu_table *tbl,
-                                   long index,
-                                   long npages);
-       void            (*tce_flush_rm)(struct iommu_table *tbl);
-
        void __iomem *  (*ioremap)(phys_addr_t addr, unsigned long size,
                                   unsigned long flags, void *caller);
        void            (*iounmap)(volatile void __iomem *token);
 
        ret = entry << tbl->it_page_shift;      /* Set the return dma address */
 
        /* Put the TCEs in the HW table */
-       build_fail = ppc_md.tce_build(tbl, entry, npages,
+       build_fail = tbl->it_ops->set(tbl, entry, npages,
                                      (unsigned long)page &
                                      IOMMU_PAGE_MASK(tbl), direction, attrs);
 
-       /* ppc_md.tce_build() only returns non-zero for transient errors.
+       /* tbl->it_ops->set() only returns non-zero for transient errors.
         * Clean up the table bitmap in this case and return
         * DMA_ERROR_CODE. For all other errors the functionality is
         * not altered.
        }
 
        /* Flush/invalidate TLB caches if necessary */
-       if (ppc_md.tce_flush)
-               ppc_md.tce_flush(tbl);
+       if (tbl->it_ops->flush)
+               tbl->it_ops->flush(tbl);
 
        /* Make sure updates are seen by hardware */
        mb();
        if (!iommu_free_check(tbl, dma_addr, npages))
                return;
 
-       ppc_md.tce_free(tbl, entry, npages);
+       tbl->it_ops->clear(tbl, entry, npages);
 
        spin_lock_irqsave(&(pool->lock), flags);
        bitmap_clear(tbl->it_map, free_entry, npages);
         * not do an mb() here on purpose, it is not needed on any of
         * the current platforms.
         */
-       if (ppc_md.tce_flush)
-               ppc_md.tce_flush(tbl);
+       if (tbl->it_ops->flush)
+               tbl->it_ops->flush(tbl);
 }
 
 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
                            npages, entry, dma_addr);
 
                /* Insert into HW table */
-               build_fail = ppc_md.tce_build(tbl, entry, npages,
+               build_fail = tbl->it_ops->set(tbl, entry, npages,
                                              vaddr & IOMMU_PAGE_MASK(tbl),
                                              direction, attrs);
                if(unlikely(build_fail))
        }
 
        /* Flush/invalidate TLB caches if necessary */
-       if (ppc_md.tce_flush)
-               ppc_md.tce_flush(tbl);
+       if (tbl->it_ops->flush)
+               tbl->it_ops->flush(tbl);
 
        DBG("mapped %d elements:\n", outcount);
 
         * do not do an mb() here, the affected platforms do not need it
         * when freeing.
         */
-       if (ppc_md.tce_flush)
-               ppc_md.tce_flush(tbl);
+       if (tbl->it_ops->flush)
+               tbl->it_ops->flush(tbl);
 }
 
 static void iommu_table_clear(struct iommu_table *tbl)
         */
        if (!is_kdump_kernel() || is_fadump_active()) {
                /* Clear the table in case firmware left allocations in it */
-               ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
+               tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
                return;
        }
 
 #ifdef CONFIG_CRASH_DUMP
-       if (ppc_md.tce_get) {
+       if (tbl->it_ops->get) {
                unsigned long index, tceval, tcecount = 0;
 
                /* Reserve the existing mappings left by the first kernel. */
                for (index = 0; index < tbl->it_size; index++) {
-                       tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
+                       tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
                        /*
                         * Freed TCE entry contains 0x7fffffffffffffff on JS20
                         */
        unsigned int i;
        struct iommu_pool *p;
 
+       BUG_ON(!tbl->it_ops);
+
        /* number of bytes needed for the bitmap */
        sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
 
 void iommu_flush_tce(struct iommu_table *tbl)
 {
        /* Flush/invalidate TLB caches if necessary */
-       if (ppc_md.tce_flush)
-               ppc_md.tce_flush(tbl);
+       if (tbl->it_ops->flush)
+               tbl->it_ops->flush(tbl);
 
        /* Make sure updates are seen by hardware */
        mb();
                unsigned long ioba, unsigned long tce_value,
                unsigned long npages)
 {
-       /* ppc_md.tce_free() does not support any value but 0 */
+       /* tbl->it_ops->clear() does not support any value but 0 */
        if (tce_value)
                return -EINVAL;
 
 
        spin_lock(&(pool->lock));
 
-       oldtce = ppc_md.tce_get(tbl, entry);
+       oldtce = tbl->it_ops->get(tbl, entry);
        if (oldtce & (TCE_PCI_WRITE | TCE_PCI_READ))
-               ppc_md.tce_free(tbl, entry, 1);
+               tbl->it_ops->clear(tbl, entry, 1);
        else
                oldtce = 0;
 
 
        spin_lock(&(pool->lock));
 
-       oldtce = ppc_md.tce_get(tbl, entry);
+       oldtce = tbl->it_ops->get(tbl, entry);
        /* Add new entry if it is not busy */
        if (!(oldtce & (TCE_PCI_WRITE | TCE_PCI_READ)))
-               ret = ppc_md.tce_build(tbl, entry, 1, hwaddr, direction, NULL);
+               ret = tbl->it_ops->set(tbl, entry, 1, hwaddr, direction, NULL);
 
        spin_unlock(&(pool->lock));
 
 
        tbl->it_type = TCE_VB;
        tbl->it_blocksize = 16;
 
+       if (firmware_has_feature(FW_FEATURE_LPAR))
+               tbl->it_ops = &iommu_table_lpar_multi_ops;
+       else
+               tbl->it_ops = &iommu_table_pseries_ops;
+
        return iommu_init_table(tbl, -1);
 }
 
 
        return *ioid;
 }
 
+static struct iommu_table_ops cell_iommu_ops = {
+       .set = tce_build_cell,
+       .clear = tce_free_cell
+};
+
 static struct iommu_window * __init
 cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
                        unsigned long offset, unsigned long size,
        window->table.it_offset =
                (offset >> window->table.it_page_shift) + pte_offset;
        window->table.it_size = size >> window->table.it_page_shift;
+       window->table.it_ops = &cell_iommu_ops;
 
        iommu_init_table(&window->table, iommu->nid);
 
        /* Setup various callbacks */
        cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
        ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
-       ppc_md.tce_build = tce_build_cell;
-       ppc_md.tce_free = tce_free_cell;
 
        if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0)
                goto bail;
 
        }
 }
 
+static struct iommu_table_ops iommu_table_iobmap_ops = {
+       .set = iobmap_build,
+       .clear  = iobmap_free
+};
 
 static void iommu_table_iobmap_setup(void)
 {
         * Should probably be 8 (64 bytes)
         */
        iommu_table_iobmap.it_blocksize = 4;
+       iommu_table_iobmap.it_ops = &iommu_table_iobmap_ops;
        iommu_init_table(&iommu_table_iobmap, 0);
        pr_debug(" <- %s\n", __func__);
 }
 
        pasemi_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pasemi;
        pasemi_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pasemi;
-       ppc_md.tce_build = iobmap_build;
-       ppc_md.tce_free  = iobmap_free;
        set_pci_dma_ops(&dma_iommu_ops);
 }
 
 
         */
 }
 
+static struct iommu_table_ops pnv_ioda1_iommu_ops = {
+       .set = pnv_tce_build,
+       .clear = pnv_tce_free,
+       .get = pnv_tce_get,
+};
+
 static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
                                         struct iommu_table *tbl,
                                         __be64 *startp, __be64 *endp, bool rm)
                pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp, rm);
 }
 
+static struct iommu_table_ops pnv_ioda2_iommu_ops = {
+       .set = pnv_tce_build,
+       .clear = pnv_tce_free,
+       .get = pnv_tce_get,
+};
+
 static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
                                      struct pnv_ioda_pe *pe, unsigned int base,
                                      unsigned int segs)
                                 TCE_PCI_SWINV_FREE   |
                                 TCE_PCI_SWINV_PAIR);
        }
+       tbl->it_ops = &pnv_ioda1_iommu_ops;
        iommu_init_table(tbl, phb->hose->node);
 
        if (pe->flags & PNV_IODA_PE_DEV) {
                                8);
                tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
        }
+       tbl->it_ops = &pnv_ioda2_iommu_ops;
        iommu_init_table(tbl, phb->hose->node);
 
        if (pe->flags & PNV_IODA_PE_DEV) {
 
 static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb) { }
 #endif /* CONFIG_PCI_MSI */
 
+static struct iommu_table_ops pnv_p5ioc2_iommu_ops = {
+       .set = pnv_tce_build,
+       .clear = pnv_tce_free,
+       .get = pnv_tce_get,
+};
+
 static void pnv_pci_p5ioc2_dma_dev_setup(struct pnv_phb *phb,
                                         struct pci_dev *pdev)
 {
        if (phb->p5ioc2.iommu_table.it_map == NULL) {
+               phb->p5ioc2.iommu_table.it_ops = &pnv_p5ioc2_iommu_ops;
                iommu_init_table(&phb->p5ioc2.iommu_table, phb->hose->node);
                iommu_register_group(&phb->p5ioc2.iommu_table,
                                pci_domain_nr(phb->hose->bus), phb->opal_id);
 
        .write = pnv_pci_write_config,
 };
 
-static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
-                        unsigned long uaddr, enum dma_data_direction direction,
-                        struct dma_attrs *attrs, bool rm)
+int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
+               unsigned long uaddr, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
 {
        u64 proto_tce = iommu_direction_to_tce_perm(direction);
        __be64 *tcep, *tces;
         * of flags if that becomes the case
         */
        if (tbl->it_type & TCE_PCI_SWINV_CREATE)
-               pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm);
+               pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, false);
 
        return 0;
 }
 
-static int pnv_tce_build_vm(struct iommu_table *tbl, long index, long npages,
-                           unsigned long uaddr,
-                           enum dma_data_direction direction,
-                           struct dma_attrs *attrs)
-{
-       return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs,
-                       false);
-}
-
-static void pnv_tce_free(struct iommu_table *tbl, long index, long npages,
-               bool rm)
+void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
 {
        __be64 *tcep, *tces;
 
                *(tcep++) = cpu_to_be64(0);
 
        if (tbl->it_type & TCE_PCI_SWINV_FREE)
-               pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm);
-}
-
-static void pnv_tce_free_vm(struct iommu_table *tbl, long index, long npages)
-{
-       pnv_tce_free(tbl, index, npages, false);
+               pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, false);
 }
 
-static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
+unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
 {
        return ((u64 *)tbl->it_base)[index - tbl->it_offset];
 }
 
-static int pnv_tce_build_rm(struct iommu_table *tbl, long index, long npages,
-                           unsigned long uaddr,
-                           enum dma_data_direction direction,
-                           struct dma_attrs *attrs)
-{
-       return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs, true);
-}
-
-static void pnv_tce_free_rm(struct iommu_table *tbl, long index, long npages)
-{
-       pnv_tce_free(tbl, index, npages, true);
-}
-
 void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
                               void *tce_mem, u64 tce_size,
                               u64 dma_offset, unsigned page_shift)
        pci_devs_phb_init();
 
        /* Configure IOMMU DMA hooks */
-       ppc_md.tce_build = pnv_tce_build_vm;
-       ppc_md.tce_free = pnv_tce_free_vm;
-       ppc_md.tce_build_rm = pnv_tce_build_rm;
-       ppc_md.tce_free_rm = pnv_tce_free_rm;
-       ppc_md.tce_get = pnv_tce_get;
        set_pci_dma_ops(&dma_iommu_ops);
 }
 
 
 };
 
 extern struct pci_ops pnv_pci_ops;
+extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
+               unsigned long uaddr, enum dma_data_direction direction,
+               struct dma_attrs *attrs);
+extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
+extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index);
 
 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
                                unsigned char *log_buff);
 
        int ret = 0;
        unsigned long flags;
 
-       if (npages == 1) {
+       if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) {
                return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
                                           direction, attrs);
        }
 {
        u64 rc;
 
+       if (!firmware_has_feature(FW_FEATURE_MULTITCE))
+               return tce_free_pSeriesLP(tbl, tcenum, npages);
+
        rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
 
        if (rc && printk_ratelimit()) {
        return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg);
 }
 
-
 #ifdef CONFIG_PCI
 static void iommu_table_setparms(struct pci_controller *phb,
                                 struct device_node *dn,
        tbl->it_size = size >> tbl->it_page_shift;
 }
 
+struct iommu_table_ops iommu_table_pseries_ops = {
+       .set = tce_build_pSeries,
+       .clear = tce_free_pSeries,
+       .get = tce_get_pseries
+};
+
 static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
 {
        struct device_node *dn;
                           pci->phb->node);
 
        iommu_table_setparms(pci->phb, dn, tbl);
+       tbl->it_ops = &iommu_table_pseries_ops;
        pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
        iommu_register_group(tbl, pci_domain_nr(bus), 0);
 
        pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
 }
 
+struct iommu_table_ops iommu_table_lpar_multi_ops = {
+       .set = tce_buildmulti_pSeriesLP,
+       .clear = tce_freemulti_pSeriesLP,
+       .get = tce_get_pSeriesLP
+};
 
 static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
 {
                tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
                                   ppci->phb->node);
                iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
+               tbl->it_ops = &iommu_table_lpar_multi_ops;
                ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
                iommu_register_group(tbl, pci_domain_nr(bus), 0);
                pr_debug("  created table: %p\n", ppci->iommu_table);
                tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
                                   phb->node);
                iommu_table_setparms(phb, dn, tbl);
+               tbl->it_ops = &iommu_table_pseries_ops;
                PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
                iommu_register_group(tbl, pci_domain_nr(phb->bus), 0);
                set_iommu_table_base(&dev->dev, tbl);
                tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
                                   pci->phb->node);
                iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
+               tbl->it_ops = &iommu_table_lpar_multi_ops;
                pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
                iommu_register_group(tbl, pci_domain_nr(pci->phb->bus), 0);
                pr_debug("  created table: %p\n", pci->iommu_table);
                return;
 
        if (firmware_has_feature(FW_FEATURE_LPAR)) {
-               if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
-                       ppc_md.tce_build = tce_buildmulti_pSeriesLP;
-                       ppc_md.tce_free  = tce_freemulti_pSeriesLP;
-               } else {
-                       ppc_md.tce_build = tce_build_pSeriesLP;
-                       ppc_md.tce_free  = tce_free_pSeriesLP;
-               }
-               ppc_md.tce_get   = tce_get_pSeriesLP;
                pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
                pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
                ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
                ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
        } else {
-               ppc_md.tce_build = tce_build_pSeries;
-               ppc_md.tce_free  = tce_free_pSeries;
-               ppc_md.tce_get   = tce_get_pseries;
                pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeries;
                pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeries;
        }
            firmware_has_feature(FW_FEATURE_LPAR) &&
            firmware_has_feature(FW_FEATURE_MULTITCE)) {
                printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
-               ppc_md.tce_build = tce_build_pSeriesLP;
-               ppc_md.tce_free  = tce_free_pSeriesLP;
                powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
        }
        return 1;
 
        return 0;
 }
 
+static struct iommu_table_ops iommu_dart_ops = {
+       .set = dart_build,
+       .clear = dart_free,
+       .flush = dart_flush,
+};
+
 static void iommu_table_dart_setup(void)
 {
        iommu_table_dart.it_busno = 0;
        iommu_table_dart.it_base = (unsigned long)dart_vbase;
        iommu_table_dart.it_index = 0;
        iommu_table_dart.it_blocksize = 1;
+       iommu_table_dart.it_ops = &iommu_dart_ops;
        iommu_init_table(&iommu_table_dart, -1);
 
        /* Reserve the last page of the DART to avoid possible prefetch
        if (dart_init(dn) != 0)
                goto bail;
 
-       /* Setup low level TCE operations for the core IOMMU code */
-       ppc_md.tce_build = dart_build;
-       ppc_md.tce_free  = dart_free;
-       ppc_md.tce_flush = dart_flush;
-
        /* Setup bypass if supported */
        if (dart_is_u4)
                ppc_md.dma_set_mask = dart_dma_set_mask;