void zpci_destroy_iommu(struct zpci_dev *zdev);
 
 #ifdef CONFIG_PCI
+static inline bool zpci_use_mio(struct zpci_dev *zdev)
+{
+       return static_branch_likely(&have_mio) && zdev->mio_capable;
+}
+
 /* Error handling and recovery */
 void zpci_event_error(void *);
 void zpci_event_availability(void *);
 
                if (!len)
                        continue;
 
-               if (static_branch_likely(&have_mio))
+               if (zpci_use_mio(zdev))
                        pdev->resource[i].start =
                                (resource_size_t __force) zdev->bars[i].mio_wb;
                else
-                       pdev->resource[i].start =
-                               (resource_size_t __force) pci_iomap(pdev, i, 0);
+                       pdev->resource[i].start = (resource_size_t __force)
+                               pci_iomap_range_fh(pdev, i, 0, 0);
                pdev->resource[i].end = pdev->resource[i].start + len - 1;
        }
 
 
 static void zpci_unmap_resources(struct pci_dev *pdev)
 {
+       struct zpci_dev *zdev = to_zpci(pdev);
        resource_size_t len;
        int i;
 
-       if (static_branch_likely(&have_mio))
+       if (zpci_use_mio(zdev))
                return;
 
        for (i = 0; i < PCI_BAR_COUNT; i++) {
                len = pci_resource_len(pdev, i);
                if (!len)
                        continue;
-               pci_iounmap(pdev, (void __iomem __force *)
-                           pdev->resource[i].start);
+               pci_iounmap_fh(pdev, (void __iomem __force *)
+                              pdev->resource[i].start);
        }
 }
 
                if (zdev->bars[i].val & 4)
                        flags |= IORESOURCE_MEM_64;
 
-               if (static_branch_likely(&have_mio))
+               if (zpci_use_mio(zdev))
                        addr = (unsigned long) zdev->bars[i].mio_wb;
                else
                        addr = ZPCI_ADDR(entry);