release_extent_buffer(eb);
 }
 
+static void btree_clear_page_dirty(struct page *page)
+{
+       ASSERT(PageDirty(page));
+       ASSERT(PageLocked(page));
+       clear_page_dirty_for_io(page);
+       xa_lock_irq(&page->mapping->i_pages);
+       if (!PageDirty(page))
+               __xa_clear_mark(&page->mapping->i_pages,
+                               page_index(page), PAGECACHE_TAG_DIRTY);
+       xa_unlock_irq(&page->mapping->i_pages);
+}
+
+static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
+{
+       struct btrfs_fs_info *fs_info = eb->fs_info;
+       struct page *page = eb->pages[0];
+       bool last;
+
+       /* btree_clear_page_dirty() needs page locked */
+       lock_page(page);
+       last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start,
+                                                 eb->len);
+       if (last)
+               btree_clear_page_dirty(page);
+       unlock_page(page);
+       WARN_ON(atomic_read(&eb->refs) == 0);
+}
+
 void clear_extent_buffer_dirty(const struct extent_buffer *eb)
 {
        int i;
        int num_pages;
        struct page *page;
 
+       if (eb->fs_info->sectorsize < PAGE_SIZE)
+               return clear_subpage_extent_buffer_dirty(eb);
+
        num_pages = num_extent_pages(eb);
 
        for (i = 0; i < num_pages; i++) {
                page = eb->pages[i];
                if (!PageDirty(page))
                        continue;
-
                lock_page(page);
-               WARN_ON(!PagePrivate(page));
-
-               clear_page_dirty_for_io(page);
-               xa_lock_irq(&page->mapping->i_pages);
-               if (!PageDirty(page))
-                       __xa_clear_mark(&page->mapping->i_pages,
-                                       page_index(page), PAGECACHE_TAG_DIRTY);
-               xa_unlock_irq(&page->mapping->i_pages);
+               btree_clear_page_dirty(page);
                ClearPageError(page);
                unlock_page(page);
        }
        WARN_ON(atomic_read(&eb->refs) == 0);
        WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
 
-       if (!was_dirty)
-               for (i = 0; i < num_pages; i++)
-                       set_page_dirty(eb->pages[i]);
+       if (!was_dirty) {
+               bool subpage = eb->fs_info->sectorsize < PAGE_SIZE;
 
+               /*
+                * For subpage case, we can have other extent buffers in the
+                * same page, and in clear_subpage_extent_buffer_dirty() we
+                * have to clear page dirty without subpage lock held.
+                * This can cause race where our page gets dirty cleared after
+                * we just set it.
+                *
+                * Thankfully, clear_subpage_extent_buffer_dirty() has locked
+                * its page for other reasons, we can use page lock to prevent
+                * the above race.
+                */
+               if (subpage)
+                       lock_page(eb->pages[0]);
+               for (i = 0; i < num_pages; i++)
+                       btrfs_page_set_dirty(eb->fs_info, eb->pages[i],
+                                            eb->start, eb->len);
+               if (subpage)
+                       unlock_page(eb->pages[0]);
+       }
 #ifdef CONFIG_BTRFS_DEBUG
        for (i = 0; i < num_pages; i++)
                ASSERT(PageDirty(eb->pages[i]));