return ret;
 }
 
+static int defrag_one_cluster(struct btrfs_inode *inode,
+                             struct file_ra_state *ra,
+                             u64 start, u32 len, u32 extent_thresh,
+                             u64 newer_than, bool do_compress,
+                             unsigned long *sectors_defragged,
+                             unsigned long max_sectors)
+{
+       const u32 sectorsize = inode->root->fs_info->sectorsize;
+       struct defrag_target_range *entry;
+       struct defrag_target_range *tmp;
+       LIST_HEAD(target_list);
+       int ret;
+
+       BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
+       ret = defrag_collect_targets(inode, start, len, extent_thresh,
+                                    newer_than, do_compress, false,
+                                    &target_list);
+       if (ret < 0)
+               goto out;
+
+       list_for_each_entry(entry, &target_list, list) {
+               u32 range_len = entry->len;
+
+               /* Reached the limit */
+               if (max_sectors && max_sectors == *sectors_defragged)
+                       break;
+
+               if (max_sectors)
+                       range_len = min_t(u32, range_len,
+                               (max_sectors - *sectors_defragged) * sectorsize);
+
+               if (ra)
+                       page_cache_sync_readahead(inode->vfs_inode.i_mapping,
+                               ra, NULL, entry->start >> PAGE_SHIFT,
+                               ((entry->start + range_len - 1) >> PAGE_SHIFT) -
+                               (entry->start >> PAGE_SHIFT) + 1);
+               /*
+                * Here we may not defrag any range if holes are punched before
+                * we locked the pages.
+                * But that's fine, it only affects the @sectors_defragged
+                * accounting.
+                */
+               ret = defrag_one_range(inode, entry->start, range_len,
+                                      extent_thresh, newer_than, do_compress);
+               if (ret < 0)
+                       break;
+               *sectors_defragged += range_len;
+       }
+out:
+       list_for_each_entry_safe(entry, tmp, &target_list, list) {
+               list_del_init(&entry->list);
+               kfree(entry);
+       }
+       return ret;
+}
+
 /*
  * Entry point to file defragmentation.
  *