#endif
 
 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
-                                     unsigned long pfn, int *target_level)
+                                     unsigned long pfn, int *target_level,
+                                     gfp_t gfp)
 {
        struct dma_pte *parent, *pte;
        int level = agaw_to_level(domain->agaw);
                if (!dma_pte_present(pte)) {
                        uint64_t pteval;
 
-                       tmp_page = alloc_pgtable_page(domain->nid, GFP_ATOMIC);
+                       tmp_page = alloc_pgtable_page(domain->nid, gfp);
 
                        if (!tmp_page)
                                return NULL;
 
        while (start_pfn <= end_pfn) {
                if (!pte)
-                       pte = pfn_to_dma_pte(domain, start_pfn, &level);
+                       pte = pfn_to_dma_pte(domain, start_pfn, &level,
+                                            GFP_ATOMIC);
 
                if (dma_pte_present(pte)) {
                        dma_pte_free_pagetable(domain, start_pfn,
 
 static int
 __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
-                unsigned long phys_pfn, unsigned long nr_pages, int prot)
+                unsigned long phys_pfn, unsigned long nr_pages, int prot,
+                gfp_t gfp)
 {
        struct dma_pte *first_pte = NULL, *pte = NULL;
        unsigned int largepage_lvl = 0;
                        largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
                                        phys_pfn, nr_pages);
 
-                       pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
+                       pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl,
+                                            gfp);
                        if (!pte)
                                return -ENOMEM;
                        first_pte = pte;
 
        return __domain_mapping(domain, first_vpfn,
                                first_vpfn, last_vpfn - first_vpfn + 1,
-                               DMA_PTE_READ|DMA_PTE_WRITE);
+                               DMA_PTE_READ|DMA_PTE_WRITE, GFP_ATOMIC);
 }
 
 static int md_domain_init(struct dmar_domain *domain, int guest_width);
           the low bits of hpa would take us onto the next page */
        size = aligned_nrpages(hpa, size);
        return __domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
-                               hpa >> VTD_PAGE_SHIFT, size, prot);
+                               hpa >> VTD_PAGE_SHIFT, size, prot, gfp);
 }
 
 static int intel_iommu_map_pages(struct iommu_domain *domain,
 
        /* Cope with horrid API which requires us to unmap more than the
           size argument if it happens to be a large-page mapping. */
-       BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
+       BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level,
+                              GFP_ATOMIC));
 
        if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
                size = VTD_PAGE_SIZE << level_to_offset_bits(level);
        int level = 0;
        u64 phys = 0;
 
-       pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
+       pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level,
+                            GFP_ATOMIC);
        if (pte && dma_pte_present(pte))
                phys = dma_pte_addr(pte) +
                        (iova & (BIT_MASK(level_to_offset_bits(level) +