static void domain_remove_dev_info(struct dmar_domain *domain);
 static void dmar_remove_one_dev_info(struct device *dev);
 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
-static void domain_context_clear(struct intel_iommu *iommu,
-                                struct device *dev);
-static int domain_detach_iommu(struct dmar_domain *domain,
-                              struct intel_iommu *iommu);
-static bool device_is_rmrr_locked(struct device *dev);
 static int intel_iommu_attach_device(struct iommu_domain *domain,
                                     struct device *dev);
 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
        return agaw;
 }
 
-static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
-                      int guest_width)
-{
-       int adjust_width, agaw;
-       unsigned long sagaw;
-       int ret;
-
-       init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
-
-       if (!intel_iommu_strict) {
-               ret = init_iova_flush_queue(&domain->iovad,
-                                           iommu_flush_iova, iova_entry_free);
-               if (ret)
-                       pr_info("iova flush queue initialization failed\n");
-       }
-
-       domain_reserve_special_ranges(domain);
-
-       /* calculate AGAW */
-       if (guest_width > cap_mgaw(iommu->cap))
-               guest_width = cap_mgaw(iommu->cap);
-       domain->gaw = guest_width;
-       adjust_width = guestwidth_to_adjustwidth(guest_width);
-       agaw = width_to_agaw(adjust_width);
-       sagaw = cap_sagaw(iommu->cap);
-       if (!test_bit(agaw, &sagaw)) {
-               /* hardware doesn't support it, choose a bigger one */
-               pr_debug("Hardware doesn't support agaw %d\n", agaw);
-               agaw = find_next_bit(&sagaw, 5, agaw);
-               if (agaw >= 5)
-                       return -ENODEV;
-       }
-       domain->agaw = agaw;
-
-       if (ecap_coherent(iommu->ecap))
-               domain->iommu_coherency = 1;
-       else
-               domain->iommu_coherency = 0;
-
-       if (ecap_sc_support(iommu->ecap))
-               domain->iommu_snooping = 1;
-       else
-               domain->iommu_snooping = 0;
-
-       if (intel_iommu_superpage)
-               domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
-       else
-               domain->iommu_superpage = 0;
-
-       domain->nid = iommu->node;
-
-       /* always allocate the top pgd */
-       domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
-       if (!domain->pgd)
-               return -ENOMEM;
-       __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
-       return 0;
-}
-
 static void domain_exit(struct dmar_domain *domain)
 {
 
        return domain;
 }
 
-static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
-{
-       *(u16 *)opaque = alias;
-       return 0;
-}
-
-static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
-{
-       struct device_domain_info *info;
-       struct dmar_domain *domain = NULL;
-       struct intel_iommu *iommu;
-       u16 dma_alias;
-       unsigned long flags;
-       u8 bus, devfn;
-
-       iommu = device_to_iommu(dev, &bus, &devfn);
-       if (!iommu)
-               return NULL;
-
-       if (dev_is_pci(dev)) {
-               struct pci_dev *pdev = to_pci_dev(dev);
-
-               pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
-
-               spin_lock_irqsave(&device_domain_lock, flags);
-               info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
-                                                     PCI_BUS_NUM(dma_alias),
-                                                     dma_alias & 0xff);
-               if (info) {
-                       iommu = info->iommu;
-                       domain = info->domain;
-               }
-               spin_unlock_irqrestore(&device_domain_lock, flags);
-
-               /* DMA alias already has a domain, use it */
-               if (info)
-                       goto out;
-       }
-
-       /* Allocate and initialize new domain for the device */
-       domain = alloc_domain(0);
-       if (!domain)
-               return NULL;
-       if (domain_init(domain, iommu, gaw)) {
-               domain_exit(domain);
-               return NULL;
-       }
-
-out:
-       return domain;
-}
-
-static struct dmar_domain *set_domain_for_dev(struct device *dev,
-                                             struct dmar_domain *domain)
-{
-       struct intel_iommu *iommu;
-       struct dmar_domain *tmp;
-       u16 req_id, dma_alias;
-       u8 bus, devfn;
-
-       iommu = device_to_iommu(dev, &bus, &devfn);
-       if (!iommu)
-               return NULL;
-
-       req_id = ((u16)bus << 8) | devfn;
-
-       if (dev_is_pci(dev)) {
-               struct pci_dev *pdev = to_pci_dev(dev);
-
-               pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
-
-               /* register PCI DMA alias device */
-               if (req_id != dma_alias) {
-                       tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
-                                       dma_alias & 0xff, NULL, domain);
-
-                       if (!tmp || tmp != domain)
-                               return tmp;
-               }
-       }
-
-       tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
-       if (!tmp || tmp != domain)
-               return tmp;
-
-       return domain;
-}
-
 static int iommu_domain_identity_map(struct dmar_domain *domain,
                                     unsigned long long start,
                                     unsigned long long end)
                                DMA_PTE_READ|DMA_PTE_WRITE);
 }
 
-static int domain_prepare_identity_map(struct device *dev,
-                                      struct dmar_domain *domain,
-                                      unsigned long long start,
-                                      unsigned long long end)
-{
-       /* For _hardware_ passthrough, don't bother. But for software
-          passthrough, we do it anyway -- it may indicate a memory
-          range which is reserved in E820, so which didn't get set
-          up to start with in si_domain */
-       if (domain == si_domain && hw_pass_through) {
-               dev_warn(dev, "Ignoring identity map for HW passthrough [0x%Lx - 0x%Lx]\n",
-                        start, end);
-               return 0;
-       }
-
-       dev_info(dev, "Setting identity map [0x%Lx - 0x%Lx]\n", start, end);
-
-       if (end < start) {
-               WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
-                       "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
-                       dmi_get_system_info(DMI_BIOS_VENDOR),
-                       dmi_get_system_info(DMI_BIOS_VERSION),
-                    dmi_get_system_info(DMI_PRODUCT_VERSION));
-               return -EIO;
-       }
-
-       if (end >> agaw_to_width(domain->agaw)) {
-               WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
-                    "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
-                    agaw_to_width(domain->agaw),
-                    dmi_get_system_info(DMI_BIOS_VENDOR),
-                    dmi_get_system_info(DMI_BIOS_VERSION),
-                    dmi_get_system_info(DMI_PRODUCT_VERSION));
-               return -EIO;
-       }
-
-       return iommu_domain_identity_map(domain, start, end);
-}
-
 static int md_domain_init(struct dmar_domain *domain, int guest_width);
 
 static int __init si_domain_init(int hw)
        return iova_pfn;
 }
 
-static struct dmar_domain *get_private_domain_for_dev(struct device *dev)
-{
-       struct dmar_domain *domain, *tmp;
-       struct dmar_rmrr_unit *rmrr;
-       struct device *i_dev;
-       int i, ret;
-
-       /* Device shouldn't be attached by any domains. */
-       domain = find_domain(dev);
-       if (domain)
-               return NULL;
-
-       domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
-       if (!domain)
-               goto out;
-
-       /* We have a new domain - setup possible RMRRs for the device */
-       rcu_read_lock();
-       for_each_rmrr_units(rmrr) {
-               for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
-                                         i, i_dev) {
-                       if (i_dev != dev)
-                               continue;
-
-                       ret = domain_prepare_identity_map(dev, domain,
-                                                         rmrr->base_address,
-                                                         rmrr->end_address);
-                       if (ret)
-                               dev_err(dev, "Mapping reserved region failed\n");
-               }
-       }
-       rcu_read_unlock();
-
-       tmp = set_domain_for_dev(dev, domain);
-       if (!tmp || domain != tmp) {
-               domain_exit(domain);
-               domain = tmp;
-       }
-
-out:
-       if (!domain)
-               dev_err(dev, "Allocating domain failed\n");
-       else
-               domain->domain.type = IOMMU_DOMAIN_DMA;
-
-       return domain;
-}
-
 /* Check if the dev needs to go through non-identity map and unmap process.*/
 static bool iommu_need_mapping(struct device *dev)
 {
-       int ret;
-
        if (iommu_dummy(dev))
                return false;
 
        if (unlikely(attach_deferred(dev)))
                do_deferred_attach(dev);
 
-       ret = identity_mapping(dev);
-       if (ret) {
-               u64 dma_mask = *dev->dma_mask;
-
-               if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
-                       dma_mask = dev->coherent_dma_mask;
-
-               if (dma_mask >= dma_direct_get_required_mask(dev))
-                       return false;
-
-               /*
-                * 32 bit DMA is removed from si_domain and fall back to
-                * non-identity mapping.
-                */
-               dmar_remove_one_dev_info(dev);
-               ret = iommu_request_dma_domain_for_dev(dev);
-               if (ret) {
-                       struct iommu_domain *domain;
-                       struct dmar_domain *dmar_domain;
-
-                       domain = iommu_get_domain_for_dev(dev);
-                       if (domain) {
-                               dmar_domain = to_dmar_domain(domain);
-                               dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
-                       }
-                       dmar_remove_one_dev_info(dev);
-                       get_private_domain_for_dev(dev);
-               }
-
-               dev_info(dev, "32bit DMA uses non-identity mapping\n");
-       }
-
-       return true;
+       return !identity_mapping(dev);
 }
 
 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
        }
        up_write(&dmar_global_lock);
 
-#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
-       /*
-        * If the system has no untrusted device or the user has decided
-        * to disable the bounce page mechanisms, we don't need swiotlb.
-        * Mark this and the pre-allocated bounce pages will be released
-        * later.
-        */
-       if (!has_untrusted_dev() || intel_no_bounce)
-               swiotlb = 0;
-#endif
        dma_ops = &intel_dma_ops;
 
        init_iommu_pm_ops();
        domain_detach_iommu(domain, iommu);
        spin_unlock_irqrestore(&iommu->lock, flags);
 
-       /* free the private domain */
-       if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
-           !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
-           list_empty(&domain->devices))
-               domain_exit(info->domain);
-
        free_devinfo_mem(info);
 }