btrfs_page_set_error(fs_info, page, start, len);
        }
 
-       if (fs_info->sectorsize == PAGE_SIZE)
+       if (!btrfs_is_subpage(fs_info, page))
                unlock_page(page);
        else
                btrfs_subpage_end_reader(fs_info, page, start, len);
 static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
 {
        ASSERT(PageLocked(page));
-       if (fs_info->sectorsize == PAGE_SIZE)
+       if (!btrfs_is_subpage(fs_info, page))
                return;
 
        ASSERT(PagePrivate(page));
         * For regular sectorsize, we can use page->private to grab extent
         * buffer
         */
-       if (fs_info->sectorsize == PAGE_SIZE) {
+       if (fs_info->nodesize >= PAGE_SIZE) {
                ASSERT(PagePrivate(page) && page->private);
                return (struct extent_buffer *)page->private;
        }
        if (page->mapping)
                lockdep_assert_held(&page->mapping->private_lock);
 
-       if (fs_info->sectorsize == PAGE_SIZE) {
+       if (fs_info->nodesize >= PAGE_SIZE) {
                if (!PagePrivate(page))
                        attach_page_private(page, eb);
                else
 
        fs_info = btrfs_sb(page->mapping->host->i_sb);
 
-       if (fs_info->sectorsize < PAGE_SIZE)
+       if (btrfs_is_subpage(fs_info, page))
                return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
 
        attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
                return;
 
        fs_info = btrfs_sb(page->mapping->host->i_sb);
-       if (fs_info->sectorsize < PAGE_SIZE)
+       if (btrfs_is_subpage(fs_info, page))
                return btrfs_detach_subpage(fs_info, page);
 
        detach_page_private(page);
         * For regular sector size == page size case, since one page only
         * contains one sector, we return the page offset directly.
         */
-       if (fs_info->sectorsize == PAGE_SIZE) {
+       if (!btrfs_is_subpage(fs_info, page)) {
                *start = page_offset(page);
                *end = page_offset(page) + PAGE_SIZE;
                return;
         * Subpage metadata doesn't use page locking at all, so we can skip
         * the page locking.
         */
-       if (!ret || fs_info->sectorsize < PAGE_SIZE)
+       if (!ret || fs_info->nodesize < PAGE_SIZE)
                return ret;
 
        num_pages = num_extent_pages(eb);
        struct bvec_iter_all iter_all;
 
        fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
-       ASSERT(fs_info->sectorsize < PAGE_SIZE);
+       ASSERT(fs_info->nodesize < PAGE_SIZE);
 
        ASSERT(!bio_flagged(bio, BIO_CLONED));
        bio_for_each_segment_all(bvec, bio, iter_all) {
        if (!PagePrivate(page))
                return 0;
 
-       if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
+       if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
                return submit_eb_subpage(page, wbc, epd);
 
        spin_lock(&mapping->private_lock);
                return;
        }
 
-       if (fs_info->sectorsize == PAGE_SIZE) {
+       if (fs_info->nodesize >= PAGE_SIZE) {
                /*
                 * We do this since we'll remove the pages after we've
                 * removed the eb from the radix tree, so we could race
         * don't try to insert two ebs for the same bytenr.  So here we always
         * return NULL and just continue.
         */
-       if (fs_info->sectorsize < PAGE_SIZE)
+       if (fs_info->nodesize < PAGE_SIZE)
                return NULL;
 
        /* Page not yet attached to an extent buffer */
        return NULL;
 }
 
+static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
+{
+       if (!IS_ALIGNED(start, fs_info->sectorsize)) {
+               btrfs_err(fs_info, "bad tree block start %llu", start);
+               return -EINVAL;
+       }
+
+       if (fs_info->nodesize < PAGE_SIZE &&
+           offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
+               btrfs_err(fs_info,
+               "tree block crosses page boundary, start %llu nodesize %u",
+                         start, fs_info->nodesize);
+               return -EINVAL;
+       }
+       if (fs_info->nodesize >= PAGE_SIZE &&
+           !IS_ALIGNED(start, PAGE_SIZE)) {
+               btrfs_err(fs_info,
+               "tree block is not page aligned, start %llu nodesize %u",
+                         start, fs_info->nodesize);
+               return -EINVAL;
+       }
+       return 0;
+}
+
 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
                                          u64 start, u64 owner_root, int level)
 {
        int uptodate = 1;
        int ret;
 
-       if (!IS_ALIGNED(start, fs_info->sectorsize)) {
-               btrfs_err(fs_info, "bad tree block start %llu", start);
+       if (check_eb_alignment(fs_info, start))
                return ERR_PTR(-EINVAL);
-       }
 
 #if BITS_PER_LONG == 32
        if (start >= MAX_LFS_FILESIZE) {
                btrfs_warn_32bit_limit(fs_info);
 #endif
 
-       if (fs_info->sectorsize < PAGE_SIZE &&
-           offset_in_page(start) + len > PAGE_SIZE) {
-               btrfs_err(fs_info,
-               "tree block crosses page boundary, start %llu nodesize %lu",
-                         start, len);
-               return ERR_PTR(-EINVAL);
-       }
-
        eb = find_extent_buffer(fs_info, start);
        if (eb)
                return eb;
                 * page, but it may change in the future for 16K page size
                 * support, so we still preallocate the memory in the loop.
                 */
-               if (fs_info->sectorsize < PAGE_SIZE) {
+               if (fs_info->nodesize < PAGE_SIZE) {
                        prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
                        if (IS_ERR(prealloc)) {
                                ret = PTR_ERR(prealloc);
        int num_pages;
        struct page *page;
 
-       if (eb->fs_info->sectorsize < PAGE_SIZE)
+       if (eb->fs_info->nodesize < PAGE_SIZE)
                return clear_subpage_extent_buffer_dirty(eb);
 
        num_pages = num_extent_pages(eb);
        WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
 
        if (!was_dirty) {
-               bool subpage = eb->fs_info->sectorsize < PAGE_SIZE;
+               bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
 
                /*
                 * For subpage case, we can have other extent buffers in the
        num_pages = num_extent_pages(eb);
        for (i = 0; i < num_pages; i++) {
                page = eb->pages[i];
-               if (page)
-                       btrfs_page_clear_uptodate(fs_info, page,
-                                                 eb->start, eb->len);
+               if (!page)
+                       continue;
+
+               /*
+                * This is special handling for metadata subpage, as regular
+                * btrfs_is_subpage() can not handle cloned/dummy metadata.
+                */
+               if (fs_info->nodesize >= PAGE_SIZE)
+                       ClearPageUptodate(page);
+               else
+                       btrfs_subpage_clear_uptodate(fs_info, page, eb->start,
+                                                    eb->len);
        }
 }
 
        num_pages = num_extent_pages(eb);
        for (i = 0; i < num_pages; i++) {
                page = eb->pages[i];
-               btrfs_page_set_uptodate(fs_info, page, eb->start, eb->len);
+
+               /*
+                * This is special handling for metadata subpage, as regular
+                * btrfs_is_subpage() can not handle cloned/dummy metadata.
+                */
+               if (fs_info->nodesize >= PAGE_SIZE)
+                       SetPageUptodate(page);
+               else
+                       btrfs_subpage_set_uptodate(fs_info, page, eb->start,
+                                                  eb->len);
        }
 }
 
        if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
                return -EIO;
 
-       if (eb->fs_info->sectorsize < PAGE_SIZE)
+       if (eb->fs_info->nodesize < PAGE_SIZE)
                return read_extent_buffer_subpage(eb, wait, mirror_num);
 
        num_pages = num_extent_pages(eb);
         * would have !PageUptodate && !PageError, as we clear PageError before
         * reading.
         */
-       if (fs_info->sectorsize < PAGE_SIZE) {
+       if (fs_info->nodesize < PAGE_SIZE) {
                bool uptodate, error;
 
                uptodate = btrfs_subpage_test_uptodate(fs_info, page,
 
        ASSERT(dst->len == src->len);
 
-       if (dst->fs_info->sectorsize == PAGE_SIZE) {
+       if (dst->fs_info->nodesize >= PAGE_SIZE) {
                num_pages = num_extent_pages(dst);
                for (i = 0; i < num_pages; i++)
                        copy_page(page_address(dst->pages[i]),
                size_t src_offset = get_eb_offset_in_page(src, 0);
                size_t dst_offset = get_eb_offset_in_page(dst, 0);
 
-               ASSERT(src->fs_info->sectorsize < PAGE_SIZE);
+               ASSERT(src->fs_info->nodesize < PAGE_SIZE);
                memcpy(page_address(dst->pages[0]) + dst_offset,
                       page_address(src->pages[0]) + src_offset,
                       src->len);
 {
        struct extent_buffer *eb;
 
-       if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
+       if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
                return try_release_subpage_extent_buffer(page);
 
        /*
 
  *   This means a slightly higher tree locking latency.
  */
 
+bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page)
+{
+       if (fs_info->sectorsize >= PAGE_SIZE)
+               return false;
+
+       /*
+        * Only data pages (either through DIO or compression) can have no
+        * mapping. And if page->mapping->host is data inode, it's subpage.
+        * As we have ruled our sectorsize >= PAGE_SIZE case already.
+        */
+       if (!page->mapping || !page->mapping->host ||
+           is_data_inode(page->mapping->host))
+               return true;
+
+       /*
+        * Now the only remaining case is metadata, which we only go subpage
+        * routine if nodesize < PAGE_SIZE.
+        */
+       if (fs_info->nodesize < PAGE_SIZE)
+               return true;
+       return false;
+}
+
 void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
 {
        unsigned int cur = 0;
                ASSERT(PageLocked(page));
 
        /* Either not subpage, or the page already has private attached */
-       if (fs_info->sectorsize == PAGE_SIZE || PagePrivate(page))
+       if (!btrfs_is_subpage(fs_info, page) || PagePrivate(page))
                return 0;
 
        subpage = btrfs_alloc_subpage(fs_info, type);
        struct btrfs_subpage *subpage;
 
        /* Either not subpage, or already detached */
-       if (fs_info->sectorsize == PAGE_SIZE || !PagePrivate(page))
+       if (!btrfs_is_subpage(fs_info, page) || !PagePrivate(page))
                return;
 
        subpage = (struct btrfs_subpage *)detach_page_private(page);
 {
        struct btrfs_subpage *subpage;
 
-       if (fs_info->sectorsize == PAGE_SIZE)
+       if (!btrfs_is_subpage(fs_info, page))
                return;
 
        ASSERT(PagePrivate(page) && page->mapping);
 {
        struct btrfs_subpage *subpage;
 
-       if (fs_info->sectorsize == PAGE_SIZE)
+       if (!btrfs_is_subpage(fs_info, page))
                return;
 
        ASSERT(PagePrivate(page) && page->mapping);
 int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
                struct page *page, u64 start, u32 len)
 {
-       if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {
+       if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) {
                lock_page(page);
                return 0;
        }
 void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
                struct page *page, u64 start, u32 len)
 {
-       if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)
+       if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page))
                return unlock_page(page);
        btrfs_subpage_clamp_range(page, &start, &len);
        if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len))
 void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info,                \
                struct page *page, u64 start, u32 len)                  \
 {                                                                      \
-       if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {   \
+       if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) {   \
                set_page_func(page);                                    \
                return;                                                 \
        }                                                               \
 void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info,      \
                struct page *page, u64 start, u32 len)                  \
 {                                                                      \
-       if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {   \
+       if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) {   \
                clear_page_func(page);                                  \
                return;                                                 \
        }                                                               \
 bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info,       \
                struct page *page, u64 start, u32 len)                  \
 {                                                                      \
-       if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)     \
+       if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page))     \
                return test_page_func(page);                            \
        return btrfs_subpage_test_##name(fs_info, page, start, len);    \
 }                                                                      \
 void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info,  \
                struct page *page, u64 start, u32 len)                  \
 {                                                                      \
-       if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {   \
+       if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) {   \
                set_page_func(page);                                    \
                return;                                                 \
        }                                                               \
 void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
                struct page *page, u64 start, u32 len)                  \
 {                                                                      \
-       if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) {   \
+       if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) {   \
                clear_page_func(page);                                  \
                return;                                                 \
        }                                                               \
 bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
                struct page *page, u64 start, u32 len)                  \
 {                                                                      \
-       if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE)     \
+       if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page))     \
                return test_page_func(page);                            \
        btrfs_subpage_clamp_range(page, &start, &len);                  \
        return btrfs_subpage_test_##name(fs_info, page, start, len);    \
                return;
 
        ASSERT(!PageDirty(page));
-       if (fs_info->sectorsize == PAGE_SIZE)
+       if (!btrfs_is_subpage(fs_info, page))
                return;
 
        ASSERT(PagePrivate(page) && page->private);
        struct btrfs_subpage *subpage;
 
        ASSERT(PageLocked(page));
-       /* For regular page size case, we just unlock the page */
-       if (fs_info->sectorsize == PAGE_SIZE)
+       /* For non-subpage case, we just unlock the page */
+       if (!btrfs_is_subpage(fs_info, page))
                return unlock_page(page);
 
        ASSERT(PagePrivate(page) && page->private);