struct vm_area_struct **vmas,
                                        unsigned int gup_flags)
 {
-       long i;
+       unsigned long i;
+       unsigned long step;
        bool drain_allow = true;
        bool migrate_allow = true;
        LIST_HEAD(cma_page_list);
 
 check_again:
-       for (i = 0; i < nr_pages; i++) {
+       for (i = 0; i < nr_pages;) {
+
+               struct page *head = compound_head(pages[i]);
+
+               /*
+                * gup may start from a tail page. Advance step by the left
+                * part.
+                */
+               step = (1 << compound_order(head)) - (pages[i] - head);
                /*
                 * If we get a page from the CMA zone, since we are going to
                 * be pinning these entries, we might as well move them out
                 * of the CMA zone if possible.
                 */
-               if (is_migrate_cma_page(pages[i])) {
-
-                       struct page *head = compound_head(pages[i]);
-
-                       if (PageHuge(head)) {
+               if (is_migrate_cma_page(head)) {
+                       if (PageHuge(head))
                                isolate_huge_page(head, &cma_page_list);
-                       } else {
+                       else {
                                if (!PageLRU(head) && drain_allow) {
                                        lru_add_drain_all();
                                        drain_allow = false;
                                }
                        }
                }
+
+               i += step;
        }
 
        if (!list_empty(&cma_page_list)) {