unsigned long isolation_error_count = 0, i;
        struct folio *prev_folio = NULL;
        LIST_HEAD(movable_page_list);
-       bool drain_allow = true;
+       bool drain_allow = true, coherent_pages = false;
        int ret = 0;
 
        for (i = 0; i < nr_pages; i++) {
                        continue;
                prev_folio = folio;
 
-               if (folio_is_longterm_pinnable(folio))
+               /*
+                * Device coherent pages are managed by a driver and should not
+                * be pinned indefinitely as it prevents the driver moving the
+                * page. So when trying to pin with FOLL_LONGTERM instead try
+                * to migrate the page out of device memory.
+                */
+               if (folio_is_device_coherent(folio)) {
+                       /*
+                        * We always want a new GUP lookup with device coherent
+                        * pages.
+                        */
+                       pages[i] = 0;
+                       coherent_pages = true;
+
+                       /*
+                        * Migration will fail if the page is pinned, so convert
+                        * the pin on the source page to a normal reference.
+                        */
+                       if (gup_flags & FOLL_PIN) {
+                               get_page(&folio->page);
+                               unpin_user_page(&folio->page);
+                       }
+
+                       ret = migrate_device_coherent_page(&folio->page);
+                       if (ret)
+                               goto unpin_pages;
+
                        continue;
+               }
 
+               if (folio_is_longterm_pinnable(folio))
+                       continue;
                /*
                 * Try to move out any movable page before pinning the range.
                 */
                                    folio_nr_pages(folio));
        }
 
-       if (!list_empty(&movable_page_list) || isolation_error_count)
+       if (!list_empty(&movable_page_list) || isolation_error_count
+               || coherent_pages)
                goto unpin_pages;
 
        /*
        return nr_pages;
 
 unpin_pages:
-       if (gup_flags & FOLL_PIN) {
-               unpin_user_pages(pages, nr_pages);
-       } else {
-               for (i = 0; i < nr_pages; i++)
+       /*
+        * pages[i] might be NULL if any device coherent pages were found.
+        */
+       for (i = 0; i < nr_pages; i++) {
+               if (!pages[i])
+                       continue;
+
+               if (gup_flags & FOLL_PIN)
+                       unpin_user_page(pages[i]);
+               else
                        put_page(pages[i]);
        }
 
 
                }
 
                if (!page) {
+                       /*
+                        * The only time there is no vma is when called from
+                        * migrate_device_coherent_page(). However this isn't
+                        * called if the page could not be unmapped.
+                        */
+                       VM_BUG_ON(!migrate->vma);
                        if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
                                continue;
                        if (!notified) {
        }
 }
 EXPORT_SYMBOL(migrate_vma_finalize);
+
+/*
+ * Migrate a device coherent page back to normal memory. The caller should have
+ * a reference on page which will be copied to the new page if migration is
+ * successful or dropped on failure.
+ */
+int migrate_device_coherent_page(struct page *page)
+{
+       unsigned long src_pfn, dst_pfn = 0;
+       struct migrate_vma args;
+       struct page *dpage;
+
+       WARN_ON_ONCE(PageCompound(page));
+
+       lock_page(page);
+       src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE;
+       args.src = &src_pfn;
+       args.dst = &dst_pfn;
+       args.cpages = 1;
+       args.npages = 1;
+       args.vma = NULL;
+
+       /*
+        * We don't have a VMA and don't need to walk the page tables to find
+        * the source page. So call migrate_vma_unmap() directly to unmap the
+        * page as migrate_vma_setup() will fail if args.vma == NULL.
+        */
+       migrate_vma_unmap(&args);
+       if (!(src_pfn & MIGRATE_PFN_MIGRATE))
+               return -EBUSY;
+
+       dpage = alloc_page(GFP_USER | __GFP_NOWARN);
+       if (dpage) {
+               lock_page(dpage);
+               dst_pfn = migrate_pfn(page_to_pfn(dpage));
+       }
+
+       migrate_vma_pages(&args);
+       if (src_pfn & MIGRATE_PFN_MIGRATE)
+               copy_highpage(dpage, page);
+       migrate_vma_finalize(&args);
+
+       if (src_pfn & MIGRATE_PFN_MIGRATE)
+               return 0;
+       return -EBUSY;
+}