struct scrub_sector {
        struct scrub_block      *sblock;
-       struct page             *page;
        struct btrfs_device     *dev;
        struct list_head        list;
        u64                     flags;  /* extent flags */
 
        sblock->sectors[sblock->sector_count] = ssector;
        sblock->sector_count++;
+       sblock->len += sblock->sctx->fs_info->sectorsize;
 
        return ssector;
 }
 
+static struct page *scrub_sector_get_page(struct scrub_sector *ssector)
+{
+       struct scrub_block *sblock = ssector->sblock;
+       int index;
+
+       /*
+        * When calling this function, ssector must be alreaday attached to the
+        * parent sblock.
+        */
+       ASSERT(sblock);
+
+       /* The range should be inside the sblock range */
+       ASSERT(ssector->logical - sblock->logical < sblock->len);
+
+       index = (ssector->logical - sblock->logical) >> PAGE_SHIFT;
+       ASSERT(index < SCRUB_MAX_PAGES);
+       ASSERT(sblock->pages[index]);
+       ASSERT(PagePrivate(sblock->pages[index]));
+       return sblock->pages[index];
+}
+
+static unsigned int scrub_sector_get_page_offset(struct scrub_sector *ssector)
+{
+       struct scrub_block *sblock = ssector->sblock;
+
+       /*
+        * When calling this function, ssector must be already attached to the
+        * parent sblock.
+        */
+       ASSERT(sblock);
+
+       /* The range should be inside the sblock range */
+       ASSERT(ssector->logical - sblock->logical < sblock->len);
+
+       return offset_in_page(ssector->logical - sblock->logical);
+}
+
+static char *scrub_sector_get_kaddr(struct scrub_sector *ssector)
+{
+       return page_address(scrub_sector_get_page(ssector)) +
+              scrub_sector_get_page_offset(ssector);
+}
+
+static int bio_add_scrub_sector(struct bio *bio, struct scrub_sector *ssector,
+                               unsigned int len)
+{
+       return bio_add_page(bio, scrub_sector_get_page(ssector), len,
+                           scrub_sector_get_page_offset(ssector));
+}
+
 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
                                     struct scrub_block *sblocks_for_recheck[]);
 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
        if (sctx->curr != -1) {
                struct scrub_bio *sbio = sctx->bios[sctx->curr];
 
-               for (i = 0; i < sbio->sector_count; i++) {
-                       WARN_ON(!sbio->sectors[i]->page);
+               for (i = 0; i < sbio->sector_count; i++)
                        scrub_block_put(sbio->sectors[i]->sblock);
-               }
                bio_put(sbio->bio);
        }
 
        for (i = 0; i < sblock->sector_count; i++) {
                struct scrub_sector *sector = sblock->sectors[i];
 
-               WARN_ON(!sector->page);
-               bio_add_page(bio, sector->page, PAGE_SIZE, 0);
+               bio_add_scrub_sector(bio, sector, fs_info->sectorsize);
        }
 
        if (scrub_submit_raid56_bio_wait(fs_info, bio, first_sector)) {
                        continue;
                }
 
-               WARN_ON(!sector->page);
                bio_init(&bio, sector->dev->bdev, &bvec, 1, REQ_OP_READ);
-               bio_add_page(&bio, sector->page, fs_info->sectorsize, 0);
+               bio_add_scrub_sector(&bio, sector, fs_info->sectorsize);
                bio.bi_iter.bi_sector = sector->physical >> 9;
 
                btrfsic_check_bio(&bio);
        struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
        const u32 sectorsize = fs_info->sectorsize;
 
-       BUG_ON(sector_bad->page == NULL);
-       BUG_ON(sector_good->page == NULL);
        if (force_write || sblock_bad->header_error ||
            sblock_bad->checksum_error || sector_bad->io_error) {
                struct bio bio;
 
                bio_init(&bio, sector_bad->dev->bdev, &bvec, 1, REQ_OP_WRITE);
                bio.bi_iter.bi_sector = sector_bad->physical >> 9;
-               __bio_add_page(&bio, sector_good->page, sectorsize, 0);
+               ret = bio_add_scrub_sector(&bio, sector_good, sectorsize);
 
                btrfsic_check_bio(&bio);
                ret = submit_bio_wait(&bio);
 
 static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock, int sector_num)
 {
+       const u32 sectorsize = sblock->sctx->fs_info->sectorsize;
        struct scrub_sector *sector = sblock->sectors[sector_num];
 
-       BUG_ON(sector->page == NULL);
        if (sector->io_error)
-               clear_page(page_address(sector->page));
+               memset(scrub_sector_get_kaddr(sector), 0, sectorsize);
 
        return scrub_add_sector_to_wr_bio(sblock->sctx, sector);
 }
                goto again;
        }
 
-       ret = bio_add_page(sbio->bio, sector->page, sectorsize, 0);
+       ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize);
        if (ret != sectorsize) {
                if (sbio->sector_count < 1) {
                        bio_put(sbio->bio);
        if (!sector->have_csum)
                return 0;
 
-       kaddr = page_address(sector->page);
+       kaddr = scrub_sector_get_kaddr(sector);
 
        shash->tfm = fs_info->csum_shash;
        crypto_shash_init(shash);
 
-       /*
-        * In scrub_sectors() and scrub_sectors_for_parity() we ensure each sector
-        * only contains one sector of data.
-        */
        crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
 
        if (memcmp(csum, sector->csum, fs_info->csum_size))
        ASSERT(sblock->sector_count == num_sectors);
 
        sector = sblock->sectors[0];
-       kaddr = page_address(sector->page);
+       kaddr = scrub_sector_get_kaddr(sector);
        h = (struct btrfs_header *)kaddr;
        memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size);
 
                            sectorsize - BTRFS_CSUM_SIZE);
 
        for (i = 1; i < num_sectors; i++) {
-               kaddr = page_address(sblock->sectors[i]->page);
+               kaddr = scrub_sector_get_kaddr(sblock->sectors[i]);
                crypto_shash_update(shash, kaddr, sectorsize);
        }
 
 
        BUG_ON(sblock->sector_count < 1);
        sector = sblock->sectors[0];
-       kaddr = page_address(sector->page);
+       kaddr = scrub_sector_get_kaddr(sector);
        s = (struct btrfs_super_block *)kaddr;
 
        if (sector->logical != btrfs_super_bytenr(s))
 
 static void scrub_sector_put(struct scrub_sector *sector)
 {
-       if (atomic_dec_and_test(§or->refs)) {
-               if (sector->page)
-                       __free_page(sector->page);
+       if (atomic_dec_and_test(§or->refs))
                kfree(sector);
-       }
 }
 
 /*
        }
 
        sbio->sectors[sbio->sector_count] = sector;
-       ret = bio_add_page(sbio->bio, sector->page, sectorsize, 0);
+       ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize);
        if (ret != sectorsize) {
                if (sbio->sector_count < 1) {
                        bio_put(sbio->bio);
        for (i = 0; i < sblock->sector_count; i++) {
                struct scrub_sector *sector = sblock->sectors[i];
 
-               /*
-                * For now, our scrub is still one page per sector, so pgoff
-                * is always 0.
-                */
-               raid56_add_scrub_pages(rbio, sector->page, 0, sector->logical);
+               raid56_add_scrub_pages(rbio, scrub_sector_get_page(sector),
+                                      scrub_sector_get_page_offset(sector),
+                                      sector->logical);
        }
 
        INIT_WORK(&sblock->work, scrub_missing_raid56_worker);