long index,
                        unsigned long *hpa,
                        enum dma_data_direction *direction);
+       /* Real mode */
+       int (*exchange_rm)(struct iommu_table *tbl,
+                       long index,
+                       unsigned long *hpa,
+                       enum dma_data_direction *direction);
 #endif
        void (*clear)(struct iommu_table *tbl,
                        long index, long npages);
 extern int __init tce_iommu_bus_notifier_init(void);
 extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
                unsigned long *hpa, enum dma_data_direction *direction);
+extern long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
+               unsigned long *hpa, enum dma_data_direction *direction);
 #else
 static inline void iommu_register_group(struct iommu_table_group *table_group,
                                        int pci_domain_number,
 
 }
 EXPORT_SYMBOL_GPL(iommu_tce_xchg);
 
+#ifdef CONFIG_PPC_BOOK3S_64
+long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
+               unsigned long *hpa, enum dma_data_direction *direction)
+{
+       long ret;
+
+       ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
+
+       if (!ret && ((*direction == DMA_FROM_DEVICE) ||
+                       (*direction == DMA_BIDIRECTIONAL))) {
+               struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT);
+
+               if (likely(pg)) {
+                       SetPageDirty(pg);
+               } else {
+                       tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
+                       ret = -EFAULT;
+               }
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm);
+#endif
+
 int iommu_take_ownership(struct iommu_table *tbl)
 {
        unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
 
 
        return ret;
 }
+
+static int pnv_ioda1_tce_xchg_rm(struct iommu_table *tbl, long index,
+               unsigned long *hpa, enum dma_data_direction *direction)
+{
+       long ret = pnv_tce_xchg(tbl, index, hpa, direction);
+
+       if (!ret)
+               pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, true);
+
+       return ret;
+}
 #endif
 
 static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
        .set = pnv_ioda1_tce_build,
 #ifdef CONFIG_IOMMU_API
        .exchange = pnv_ioda1_tce_xchg,
+       .exchange_rm = pnv_ioda1_tce_xchg_rm,
 #endif
        .clear = pnv_ioda1_tce_free,
        .get = pnv_tce_get,
 {
        struct iommu_table_group_link *tgl;
 
-       list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
+       list_for_each_entry_lockless(tgl, &tbl->it_group_list, next) {
                struct pnv_ioda_pe *pe = container_of(tgl->table_group,
                                struct pnv_ioda_pe, table_group);
                struct pnv_phb *phb = pe->phb;
 
        return ret;
 }
+
+static int pnv_ioda2_tce_xchg_rm(struct iommu_table *tbl, long index,
+               unsigned long *hpa, enum dma_data_direction *direction)
+{
+       long ret = pnv_tce_xchg(tbl, index, hpa, direction);
+
+       if (!ret)
+               pnv_pci_ioda2_tce_invalidate(tbl, index, 1, true);
+
+       return ret;
+}
 #endif
 
 static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
        .set = pnv_ioda2_tce_build,
 #ifdef CONFIG_IOMMU_API
        .exchange = pnv_ioda2_tce_xchg,
+       .exchange_rm = pnv_ioda2_tce_xchg_rm,
 #endif
        .clear = pnv_ioda2_tce_free,
        .get = pnv_tce_get,