* checked so the end_io handlers know about it
                 */
                ASSERT(!bio_flagged(bio, BIO_CLONED));
-               bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
-                       SetPageChecked(bvec->bv_page);
+               bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) {
+                       u64 bvec_start = page_offset(bvec->bv_page) +
+                                        bvec->bv_offset;
+
+                       btrfs_page_set_checked(btrfs_sb(cb->inode->i_sb),
+                                       bvec->bv_page, bvec_start,
+                                       bvec->bv_len);
+               }
 
                bio_endio(cb->orig_bio);
        }
 
 /*
  * unlocks pages after btrfs_file_write is done with them
  */
-static void btrfs_drop_pages(struct page **pages, size_t num_pages)
+static void btrfs_drop_pages(struct btrfs_fs_info *fs_info,
+                            struct page **pages, size_t num_pages,
+                            u64 pos, u64 copied)
 {
        size_t i;
+       u64 block_start = round_down(pos, fs_info->sectorsize);
+       u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start;
+
+       ASSERT(block_len <= U32_MAX);
        for (i = 0; i < num_pages; i++) {
                /* page checked is some magic around finding pages that
                 * have been modified without going through btrfs_set_page_dirty
                 * accessed as prepare_pages should have marked them accessed
                 * in prepare_pages via find_or_create_page()
                 */
-               ClearPageChecked(pages[i]);
+               btrfs_page_clamp_clear_checked(fs_info, pages[i], block_start,
+                                              block_len);
                unlock_page(pages[i]);
                put_page(pages[i]);
        }
                struct page *p = pages[i];
 
                btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes);
-               ClearPageChecked(p);
+               btrfs_page_clamp_clear_checked(fs_info, p, start_pos, num_bytes);
                btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes);
        }
 
 
                btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
                if (ret) {
-                       btrfs_drop_pages(pages, num_pages);
+                       btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
                        break;
                }
 
                if (only_release_metadata)
                        btrfs_check_nocow_unlock(BTRFS_I(inode));
 
-               btrfs_drop_pages(pages, num_pages);
+               btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
 
                cond_resched();
 
 
 #include "delalloc-space.h"
 #include "block-group.h"
 #include "discard.h"
+#include "subpage.h"
 
 #define BITS_PER_BITMAP                (PAGE_SIZE * 8UL)
 #define MAX_CACHE_BYTES_PER_GIG        SZ_64K
 
        for (i = 0; i < io_ctl->num_pages; i++) {
                if (io_ctl->pages[i]) {
-                       ClearPageChecked(io_ctl->pages[i]);
+                       btrfs_page_clear_checked(io_ctl->fs_info,
+                                       io_ctl->pages[i],
+                                       page_offset(io_ctl->pages[i]),
+                                       PAGE_SIZE);
                        unlock_page(io_ctl->pages[i]);
                        put_page(io_ctl->pages[i]);
                }
 
                clear_page_dirty_for_io(page);
                SetPageError(page);
        }
-       ClearPageChecked(page);
+       btrfs_page_clear_checked(inode->root->fs_info, page, page_start, PAGE_SIZE);
        unlock_page(page);
        put_page(page);
        kfree(fixup);
         * page->mapping outside of the page lock.
         */
        ihold(inode);
-       SetPageChecked(page);
+       btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE);
        get_page(page);
        btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
        fixup->page = page;
                                    u64 start, u64 end)
 {
        struct inode *inode = page->mapping->host;
+       struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        const u32 sectorsize = root->fs_info->sectorsize;
        u32 pg_off;
        unsigned int result = 0;
 
-       if (PageChecked(page)) {
-               ClearPageChecked(page);
+       if (btrfs_page_test_checked(fs_info, page, start, end + 1 - start)) {
+               btrfs_page_clear_checked(fs_info, page, start, end + 1 - start);
                return 0;
        }
 
        /*
-        * For subpage case, above PageChecked is not safe as it's not subpage
-        * compatible.
-        * But for now only cow fixup and compressed read utilize PageChecked
-        * flag, while in this context we can easily use bbio->csum to
-        * determine if we really need to do csum verification.
-        *
-        * So for now, just exit if bbio->csum is NULL, as it means it's
-        * compressed read, and its compressed data csum has already been
-        * verified.
+        * This only happens for NODATASUM or compressed read.
+        * Normally this should be covered by above check for compressed read
+        * or the next check for NODATASUM.  Just do a quicker exit here.
         */
        if (bbio->csum == NULL)
                return 0;
                                     len);
                flush_dcache_page(page);
        }
-       ClearPageChecked(page);
+       btrfs_page_clear_checked(fs_info, page, block_start,
+                                block_end + 1 - block_start);
        btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start);
        unlock_extent_cached(io_tree, block_start, block_end, &cached_state);
 
         * did something wrong.
         */
        ASSERT(!PageOrdered(page));
+       btrfs_page_clear_checked(fs_info, page, page_offset(page), PAGE_SIZE);
        if (!inode_evicting)
                __btrfs_releasepage(page, GFP_NOFS);
-       ClearPageChecked(page);
        clear_page_extent_mapped(page);
 }
 
                memzero_page(page, zero_start, PAGE_SIZE - zero_start);
                flush_dcache_page(page);
        }
-       ClearPageChecked(page);
+       btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE);
        btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start);
        btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start);
 
 
        }
 
        btrfs_page_set_uptodate(fs_info, page, file_offset, block_size);
-       ClearPageChecked(page);
+       btrfs_page_clear_checked(fs_info, page, file_offset, block_size);
        btrfs_page_set_dirty(fs_info, page, file_offset, block_size);
 out_unlock:
        if (page) {
 
        subpage_info->ordered_offset = cur;
        cur += nr_bits;
 
+       subpage_info->checked_offset = cur;
+       cur += nr_bits;
+
        subpage_info->total_nr_bits = cur;
 }
 
        u32 orig_len = *len;
 
        *start = max_t(u64, page_offset(page), orig_start);
-       *len = min_t(u64, page_offset(page) + PAGE_SIZE,
-                    orig_start + orig_len) - *start;
+       /*
+        * For certain call sites like btrfs_drop_pages(), we may have pages
+        * beyond the target range. In that case, just set @len to 0, subpage
+        * helpers can handle @len == 0 without any problem.
+        */
+       if (page_offset(page) >= orig_start + orig_len)
+               *len = 0;
+       else
+               *len = min_t(u64, page_offset(page) + PAGE_SIZE,
+                            orig_start + orig_len) - *start;
 }
 
 void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
                ClearPageOrdered(page);
        spin_unlock_irqrestore(&subpage->lock, flags);
 }
+
+void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
+                              struct page *page, u64 start, u32 len)
+{
+       struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+       unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
+                                                       checked, start, len);
+       unsigned long flags;
+
+       spin_lock_irqsave(&subpage->lock, flags);
+       bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+       if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
+               SetPageChecked(page);
+       spin_unlock_irqrestore(&subpage->lock, flags);
+}
+
+void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
+                                struct page *page, u64 start, u32 len)
+{
+       struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+       unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
+                                                       checked, start, len);
+       unsigned long flags;
+
+       spin_lock_irqsave(&subpage->lock, flags);
+       bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
+       ClearPageChecked(page);
+       spin_unlock_irqrestore(&subpage->lock, flags);
+}
+
 /*
  * Unlike set/clear which is dependent on each page status, for test all bits
  * are tested in the same way.
 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
+IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
 
 /*
  * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
                         PageWriteback);
 IMPLEMENT_BTRFS_PAGE_OPS(ordered, SetPageOrdered, ClearPageOrdered,
                         PageOrdered);
+IMPLEMENT_BTRFS_PAGE_OPS(checked, SetPageChecked, ClearPageChecked, PageChecked);
 
 /*
  * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
 
        unsigned int dirty_offset;
        unsigned int writeback_offset;
        unsigned int ordered_offset;
+       unsigned int checked_offset;
 };
 
 /*
 DECLARE_BTRFS_SUBPAGE_OPS(dirty);
 DECLARE_BTRFS_SUBPAGE_OPS(writeback);
 DECLARE_BTRFS_SUBPAGE_OPS(ordered);
+DECLARE_BTRFS_SUBPAGE_OPS(checked);
 
 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
                struct page *page, u64 start, u32 len);