*
  * Return: The number of entries which were found.
  */
-unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
+unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
                pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
 {
-       XA_STATE(xas, &mapping->i_pages, start);
+       XA_STATE(xas, &mapping->i_pages, *start);
        struct folio *folio;
 
        rcu_read_lock();
        }
        rcu_read_unlock();
 
+       if (folio_batch_count(fbatch)) {
+               unsigned long nr = 1;
+               int idx = folio_batch_count(fbatch) - 1;
+
+               folio = fbatch->folios[idx];
+               if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
+                       nr = folio_nr_pages(folio);
+               *start = indices[idx] + nr;
+       }
        return folio_batch_count(fbatch);
 }
 
 
 
 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
                pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
-unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
+unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
                pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
 
        while (index < end) {
                cond_resched();
 
-               if (!find_get_entries(mapping, index, end - 1, &fbatch,
+               if (!find_get_entries(mapping, &index, end - 1, &fbatch,
                                indices)) {
                        /* If all gone or hole-punch or unfalloc, we're done */
                        if (index == start || end != -1)
                for (i = 0; i < folio_batch_count(&fbatch); i++) {
                        folio = fbatch.folios[i];
 
-                       index = indices[i];
                        if (xa_is_value(folio)) {
                                if (unfalloc)
                                        continue;
-                               if (shmem_free_swap(mapping, index, folio)) {
+                               if (shmem_free_swap(mapping, indices[i], folio)) {
                                        /* Swap was replaced by page: retry */
-                                       index--;
+                                       index = indices[i];
                                        break;
                                }
                                nr_swaps_freed++;
                                if (folio_mapping(folio) != mapping) {
                                        /* Page was replaced by swap: retry */
                                        folio_unlock(folio);
-                                       index--;
+                                       index = indices[i];
                                        break;
                                }
                                VM_BUG_ON_FOLIO(folio_test_writeback(folio),
                                                folio);
                                truncate_inode_folio(mapping, folio);
                        }
-                       index = folio->index + folio_nr_pages(folio) - 1;
                        folio_unlock(folio);
                }
                folio_batch_remove_exceptionals(&fbatch);
                folio_batch_release(&fbatch);
-               index++;
        }
 
        spin_lock_irq(&info->lock);
 
        index = start;
        while (index < end) {
                cond_resched();
-               if (!find_get_entries(mapping, index, end - 1, &fbatch,
+               if (!find_get_entries(mapping, &index, end - 1, &fbatch,
                                indices)) {
                        /* If all gone from start onwards, we're done */
                        if (index == start)
                        struct folio *folio = fbatch.folios[i];
 
                        /* We rely upon deletion not changing page->index */
-                       index = indices[i];
 
                        if (xa_is_value(folio))
                                continue;
 
                        folio_lock(folio);
-                       VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
+                       VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
                        folio_wait_writeback(folio);
                        truncate_inode_folio(mapping, folio);
                        folio_unlock(folio);
-                       index = folio_index(folio) + folio_nr_pages(folio) - 1;
                }
                truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
                folio_batch_release(&fbatch);
-               index++;
        }
 }
 EXPORT_SYMBOL(truncate_inode_pages_range);
 
        folio_batch_init(&fbatch);
        index = start;
-       while (find_get_entries(mapping, index, end, &fbatch, indices)) {
+       while (find_get_entries(mapping, &index, end, &fbatch, indices)) {
                for (i = 0; i < folio_batch_count(&fbatch); i++) {
                        struct folio *folio = fbatch.folios[i];
 
                        /* We rely upon deletion not changing folio->index */
-                       index = indices[i];
 
                        if (xa_is_value(folio)) {
                                if (!invalidate_exceptional_entry2(mapping,
-                                               index, folio))
+                                               indices[i], folio))
                                        ret = -EBUSY;
                                continue;
                        }
                                 * If folio is mapped, before taking its lock,
                                 * zap the rest of the file in one hit.
                                 */
-                               unmap_mapping_pages(mapping, index,
-                                               (1 + end - index), false);
+                               unmap_mapping_pages(mapping, indices[i],
+                                               (1 + end - indices[i]), false);
                                did_range_unmap = 1;
                        }
 
                        folio_lock(folio);
-                       VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
+                       VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
                        if (folio->mapping != mapping) {
                                folio_unlock(folio);
                                continue;
                folio_batch_remove_exceptionals(&fbatch);
                folio_batch_release(&fbatch);
                cond_resched();
-               index++;
        }
        /*
         * For DAX we invalidate page tables after invalidating page cache.  We