know the hardware page-walk will no longer touch them.
    The 'pte' argument is the *parent* PTE, pointing to the page that is to
    be freed. */
-static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
-                                           int level, struct dma_pte *pte,
-                                           struct page *freelist)
+static void dma_pte_list_pagetables(struct dmar_domain *domain,
+                                   int level, struct dma_pte *pte,
+                                   struct list_head *freelist)
 {
        struct page *pg;
 
        pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
-       pg->freelist = freelist;
-       freelist = pg;
+       list_add_tail(&pg->lru, freelist);
 
        if (level == 1)
-               return freelist;
+               return;
 
        pte = page_address(pg);
        do {
                if (dma_pte_present(pte) && !dma_pte_superpage(pte))
-                       freelist = dma_pte_list_pagetables(domain, level - 1,
-                                                          pte, freelist);
+                       dma_pte_list_pagetables(domain, level - 1, pte, freelist);
                pte++;
        } while (!first_pte_in_page(pte));
-
-       return freelist;
 }
 
-static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
-                                       struct dma_pte *pte, unsigned long pfn,
-                                       unsigned long start_pfn,
-                                       unsigned long last_pfn,
-                                       struct page *freelist)
+static void dma_pte_clear_level(struct dmar_domain *domain, int level,
+                               struct dma_pte *pte, unsigned long pfn,
+                               unsigned long start_pfn, unsigned long last_pfn,
+                               struct list_head *freelist)
 {
        struct dma_pte *first_pte = NULL, *last_pte = NULL;
 
                        /* These suborbinate page tables are going away entirely. Don't
                           bother to clear them; we're just going to *free* them. */
                        if (level > 1 && !dma_pte_superpage(pte))
-                               freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
+                               dma_pte_list_pagetables(domain, level - 1, pte, freelist);
 
                        dma_clear_pte(pte);
                        if (!first_pte)
                        last_pte = pte;
                } else if (level > 1) {
                        /* Recurse down into a level that isn't *entirely* obsolete */
-                       freelist = dma_pte_clear_level(domain, level - 1,
-                                                      phys_to_virt(dma_pte_addr(pte)),
-                                                      level_pfn, start_pfn, last_pfn,
-                                                      freelist);
+                       dma_pte_clear_level(domain, level - 1,
+                                           phys_to_virt(dma_pte_addr(pte)),
+                                           level_pfn, start_pfn, last_pfn,
+                                           freelist);
                }
 next:
                pfn = level_pfn + level_size(level);
        if (first_pte)
                domain_flush_cache(domain, first_pte,
                                   (void *)++last_pte - (void *)first_pte);
-
-       return freelist;
 }
 
 /* We can't just free the pages because the IOMMU may still be walking
    the page tables, and may have cached the intermediate levels. The
    pages can only be freed after the IOTLB flush has been done. */
-static struct page *domain_unmap(struct dmar_domain *domain,
-                                unsigned long start_pfn,
-                                unsigned long last_pfn,
-                                struct page *freelist)
+static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
+                        unsigned long last_pfn, struct list_head *freelist)
 {
        BUG_ON(!domain_pfn_supported(domain, start_pfn));
        BUG_ON(!domain_pfn_supported(domain, last_pfn));
        BUG_ON(start_pfn > last_pfn);
 
        /* we don't need lock here; nobody else touches the iova range */
-       freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
-                                      domain->pgd, 0, start_pfn, last_pfn,
-                                      freelist);
+       dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
+                           domain->pgd, 0, start_pfn, last_pfn, freelist);
 
        /* free pgd */
        if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
                struct page *pgd_page = virt_to_page(domain->pgd);
-               pgd_page->freelist = freelist;
-               freelist = pgd_page;
-
+               list_add_tail(&pgd_page->lru, freelist);
                domain->pgd = NULL;
        }
-
-       return freelist;
-}
-
-static void dma_free_pagelist(struct page *freelist)
-{
-       struct page *pg;
-
-       while ((pg = freelist)) {
-               freelist = pg->freelist;
-               free_pgtable_page(page_address(pg));
-       }
 }
 
 /* iommu handling */
        domain_remove_dev_info(domain);
 
        if (domain->pgd) {
-               struct page *freelist;
+               LIST_HEAD(freelist);
 
-               freelist = domain_unmap(domain, 0,
-                                       DOMAIN_MAX_PFN(domain->gaw), NULL);
-               dma_free_pagelist(freelist);
+               domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist);
+               put_pages_list(&freelist);
        }
 
        free_domain_mem(domain);
                {
                        struct dmar_drhd_unit *drhd;
                        struct intel_iommu *iommu;
-                       struct page *freelist;
+                       LIST_HEAD(freelist);
 
-                       freelist = domain_unmap(si_domain,
-                                               start_vpfn, last_vpfn,
-                                               NULL);
+                       domain_unmap(si_domain, start_vpfn, last_vpfn, &freelist);
 
                        rcu_read_lock();
                        for_each_active_iommu(iommu, drhd)
                                iommu_flush_iotlb_psi(iommu, si_domain,
                                        start_vpfn, mhp->nr_pages,
-                                       !freelist, 0);
+                                       list_empty(&freelist), 0);
                        rcu_read_unlock();
-                       dma_free_pagelist(freelist);
+                       put_pages_list(&freelist);
                }
                break;
        }
        start_pfn = iova >> VTD_PAGE_SHIFT;
        last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
 
-       gather->freelist = domain_unmap(dmar_domain, start_pfn,
-                                       last_pfn, gather->freelist);
+       domain_unmap(dmar_domain, start_pfn, last_pfn, &gather->freelist);
 
        if (dmar_domain->max_addr == iova + size)
                dmar_domain->max_addr = iova;
 
        for_each_domain_iommu(iommu_id, dmar_domain)
                iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
-                                     start_pfn, nrpages, !gather->freelist, 0);
+                                     start_pfn, nrpages,
+                                     list_empty(&gather->freelist), 0);
 
-       dma_free_pagelist(gather->freelist);
+       put_pages_list(&gather->freelist);
 }
 
 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
 
 int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_domain)
 {
        struct iova_fq __percpu *queue;
-       int cpu;
+       int i, cpu;
 
        atomic64_set(&iovad->fq_flush_start_cnt,  0);
        atomic64_set(&iovad->fq_flush_finish_cnt, 0);
                fq->tail = 0;
 
                spin_lock_init(&fq->lock);
+
+               for (i = 0; i < IOVA_FQ_SIZE; i++)
+                       INIT_LIST_HEAD(&fq->entries[i].freelist);
        }
 
        iovad->fq_domain = fq_domain;
 }
 EXPORT_SYMBOL_GPL(free_iova_fast);
 
-static void fq_entry_dtor(struct page *freelist)
-{
-       while (freelist) {
-               unsigned long p = (unsigned long)page_address(freelist);
-
-               freelist = freelist->freelist;
-               free_page(p);
-       }
-}
-
 #define fq_ring_for_each(i, fq) \
        for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
 
                if (fq->entries[idx].counter >= counter)
                        break;
 
-               fq_entry_dtor(fq->entries[idx].freelist);
+               put_pages_list(&fq->entries[idx].freelist);
                free_iova_fast(iovad,
                               fq->entries[idx].iova_pfn,
                               fq->entries[idx].pages);
 
        /*
         * This code runs when the iova_domain is being detroyed, so don't
-        * bother to free iovas, just call the entry_dtor on all remaining
-        * entries.
+        * bother to free iovas, just free any remaining pagetable pages.
         */
        for_each_possible_cpu(cpu) {
                struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
                int idx;
 
                fq_ring_for_each(idx, fq)
-                       fq_entry_dtor(fq->entries[idx].freelist);
+                       put_pages_list(&fq->entries[idx].freelist);
        }
 }
 
 
 void queue_iova(struct iova_domain *iovad,
                unsigned long pfn, unsigned long pages,
-               struct page *freelist)
+               struct list_head *freelist)
 {
        struct iova_fq *fq;
        unsigned long flags;
 
        fq->entries[idx].iova_pfn = pfn;
        fq->entries[idx].pages    = pages;
-       fq->entries[idx].freelist = freelist;
        fq->entries[idx].counter  = atomic64_read(&iovad->fq_flush_start_cnt);
+       list_splice(freelist, &fq->entries[idx].freelist);
 
        spin_unlock_irqrestore(&fq->lock, flags);