kfree(s390_domain);
                return NULL;
        }
+       s390_domain->domain.geometry.force_aperture = true;
+       s390_domain->domain.geometry.aperture_start = 0;
+       s390_domain->domain.geometry.aperture_end = ZPCI_TABLE_SIZE_RT - 1;
 
        spin_lock_init(&s390_domain->dma_table_lock);
        spin_lock_init(&s390_domain->list_lock);
        struct s390_domain *s390_domain = to_s390_domain(domain);
        struct zpci_dev *zdev = to_zpci_dev(dev);
        unsigned long flags;
-       int cc, rc = 0;
+       int cc;
 
        if (!zdev)
                return -ENODEV;
 
+       if (WARN_ON(domain->geometry.aperture_start > zdev->end_dma ||
+               domain->geometry.aperture_end < zdev->start_dma))
+               return -EINVAL;
+
        if (zdev->s390_domain)
                __s390_iommu_detach_device(zdev);
        else if (zdev->dma_table)
                return -EIO;
        zdev->dma_table = s390_domain->dma_table;
 
-       spin_lock_irqsave(&s390_domain->list_lock, flags);
-       /* First device defines the DMA range limits */
-       if (list_empty(&s390_domain->devices)) {
-               domain->geometry.aperture_start = zdev->start_dma;
-               domain->geometry.aperture_end = zdev->end_dma;
-               domain->geometry.force_aperture = true;
-       /* Allow only devices with identical DMA range limits */
-       } else if (domain->geometry.aperture_start != zdev->start_dma ||
-                  domain->geometry.aperture_end != zdev->end_dma) {
-               spin_unlock_irqrestore(&s390_domain->list_lock, flags);
-               rc = -EINVAL;
-               goto out_unregister;
-       }
+       zdev->dma_table = s390_domain->dma_table;
        zdev->s390_domain = s390_domain;
+
+       spin_lock_irqsave(&s390_domain->list_lock, flags);
        list_add(&zdev->iommu_list, &s390_domain->devices);
        spin_unlock_irqrestore(&s390_domain->list_lock, flags);
 
        return 0;
-
-out_unregister:
-       zpci_unregister_ioat(zdev, 0);
-       zdev->dma_table = NULL;
-
-       return rc;
 }
 
 static void s390_iommu_detach_device(struct iommu_domain *domain,
        zpci_dma_init_device(zdev);
 }
 
+static void s390_iommu_get_resv_regions(struct device *dev,
+                                       struct list_head *list)
+{
+       struct zpci_dev *zdev = to_zpci_dev(dev);
+       struct iommu_resv_region *region;
+
+       if (zdev->start_dma) {
+               region = iommu_alloc_resv_region(0, zdev->start_dma, 0,
+                                                IOMMU_RESV_RESERVED, GFP_KERNEL);
+               if (!region)
+                       return;
+               list_add_tail(®ion->list, list);
+       }
+
+       if (zdev->end_dma < ZPCI_TABLE_SIZE_RT - 1) {
+               region = iommu_alloc_resv_region(zdev->end_dma + 1,
+                                                ZPCI_TABLE_SIZE_RT - zdev->end_dma - 1,
+                                                0, IOMMU_RESV_RESERVED, GFP_KERNEL);
+               if (!region)
+                       return;
+               list_add_tail(®ion->list, list);
+       }
+}
+
 static struct iommu_device *s390_iommu_probe_device(struct device *dev)
 {
        struct zpci_dev *zdev;
 
        zdev = to_zpci_dev(dev);
 
+       if (zdev->start_dma > zdev->end_dma ||
+           zdev->start_dma > ZPCI_TABLE_SIZE_RT - 1)
+               return ERR_PTR(-EINVAL);
+
+       if (zdev->end_dma > ZPCI_TABLE_SIZE_RT - 1)
+               zdev->end_dma = ZPCI_TABLE_SIZE_RT - 1;
+
        return &zdev->iommu_dev;
 }
 
        .release_device = s390_iommu_release_device,
        .device_group = generic_device_group,
        .pgsize_bitmap = S390_IOMMU_PGSIZES,
+       .get_resv_regions = s390_iommu_get_resv_regions,
        .default_domain_ops = &(const struct iommu_domain_ops) {
                .attach_dev     = s390_iommu_attach_device,
                .detach_dev     = s390_iommu_detach_device,