* This takes the tree lock, and returns 0 on success and < 0 on error.
  */
 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                      u32 bits, int delete,
-                      struct extent_state **cached_state,
+                      u32 bits, struct extent_state **cached_state,
                       gfp_t mask, struct extent_changeset *changeset)
 {
        struct extent_state *state;
        u64 last_end;
        int err;
        int clear = 0;
-       int wake = (bits & EXTENT_LOCKED) ? 1 : 0;
+       int wake;
+       int delete = (bits & EXTENT_CLEAR_ALL_BITS);
 
        btrfs_debug_check_extent_io_range(tree, start, end);
        trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
 
-       if (bits & EXTENT_DELALLOC)
-               bits |= EXTENT_NORESERVE;
-
        if (delete)
                bits |= ~EXTENT_CTLBITS;
 
+       if (bits & EXTENT_DELALLOC)
+               bits |= EXTENT_NORESERVE;
+
+       wake = (bits & EXTENT_LOCKED) ? 1 : 0;
        if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
                clear = 1;
 again:
         */
        ASSERT(!(bits & EXTENT_LOCKED));
 
-       return __clear_extent_bit(tree, start, end, bits, 0, NULL, GFP_NOFS,
+       return __clear_extent_bit(tree, start, end, bits, NULL, GFP_NOFS,
                                  changeset);
 }
 
        if (err == -EEXIST) {
                if (failed_start > start)
                        clear_extent_bit(tree, start, failed_start - 1,
-                                        EXTENT_LOCKED, 0, NULL);
+                                        EXTENT_LOCKED, NULL);
                return 0;
        }
        return 1;
 
  * delalloc bytes decremented, in an atomic way to prevent races with stat(2).
  */
 #define EXTENT_ADD_INODE_BYTES  (1U << 15)
+
+/*
+ * Set during truncate when we're clearing an entire range and we just want the
+ * extent states to go away.
+ */
+#define EXTENT_CLEAR_ALL_BITS  (1U << 16)
+
 #define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
                                 EXTENT_CLEAR_DATA_RESV)
 #define EXTENT_CTLBITS         (EXTENT_DO_ACCOUNTING | \
-                                EXTENT_ADD_INODE_BYTES)
+                                EXTENT_ADD_INODE_BYTES | \
+                                EXTENT_CLEAR_ALL_BITS)
 
 /*
  * Redefined bits above which are used only in the device allocation tree,
 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
                             u32 bits, struct extent_changeset *changeset);
 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                      u32 bits, int delete, struct extent_state **cached,
-                      gfp_t mask, struct extent_changeset *changeset);
+                      u32 bits, struct extent_state **cached, gfp_t mask,
+                      struct extent_changeset *changeset);
 
 static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start,
-                                  u64 end, u32 bits, int delete,
+                                  u64 end, u32 bits,
                                   struct extent_state **cached)
 {
-       return __clear_extent_bit(tree, start, end, bits, delete, cached,
+       return __clear_extent_bit(tree, start, end, bits, cached,
                                  GFP_NOFS, NULL);
 }
 
 static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
                                struct extent_state **cached)
 {
-       return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 0, cached,
+       return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached,
                                  GFP_NOFS, NULL);
 }
 
 static inline int unlock_extent_atomic(struct extent_io_tree *tree, u64 start,
                                       u64 end, struct extent_state **cached)
 {
-       return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 0, cached,
+       return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached,
                                  GFP_ATOMIC, NULL);
 }
 
 static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
                                    u64 end, u32 bits)
 {
-       return clear_extent_bit(tree, start, end, bits, 0, NULL);
+       return clear_extent_bit(tree, start, end, bits, NULL);
 }
 
 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
                u64 end, struct extent_state **cached_state)
 {
-       return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
+       return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
                                  cached_state, GFP_NOFS, NULL);
 }
 
 {
        return clear_extent_bit(tree, start, end,
                                EXTENT_DIRTY | EXTENT_DELALLOC |
-                               EXTENT_DO_ACCOUNTING, 0, cached);
+                               EXTENT_DO_ACCOUNTING, cached);
 }
 
 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 
                                  struct page *locked_page,
                                  u32 clear_bits, unsigned long page_ops)
 {
-       clear_extent_bit(&inode->io_tree, start, end, clear_bits, 0, NULL);
+       clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL);
 
        __process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
                               start, end, page_ops, NULL);
                 * The delalloc new bit will be cleared by ordered extent
                 * completion.
                 */
-               ret = __clear_extent_bit(tree, start, end, clear_bits, 0, NULL,
+               ret = __clear_extent_bit(tree, start, end, clear_bits, NULL,
                                         mask, NULL);
 
                /* if clear_extent_bit failed for enomem reasons,
 
 
                __clear_extent_bit(&device->alloc_state, stripe->physical,
                                   stripe->physical + stripe_size - 1, bits,
-                                  0, NULL, GFP_NOWAIT, NULL);
+                                  NULL, GFP_NOWAIT, NULL);
        }
 }
 
 
        if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES))
                return 0;
        return clear_extent_bit(&inode->file_extent_tree, start,
-                               start + len - 1, EXTENT_DIRTY, 0, NULL);
+                               start + len - 1, EXTENT_DIRTY, NULL);
 }
 
 static inline u32 max_ordered_sum_bytes(struct btrfs_fs_info *fs_info,
 
         */
        clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
                         EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
-                        0, cached);
+                        cached);
 
        err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
                                        extra_bits, cached);
 
        ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
        if (ret < 0) {
                clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
-                                EXTENT_DELALLOC, 0, NULL);
+                                EXTENT_DELALLOC, NULL);
                goto fail;
        }
        leaf = path->nodes[0];
                if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
                    found_key.offset != offset) {
                        clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
-                                        inode->i_size - 1, EXTENT_DELALLOC, 0,
+                                        inode->i_size - 1, EXTENT_DELALLOC,
                                         NULL);
                        btrfs_release_path(path);
                        goto fail;
        ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
        if (ret)
                clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
-                                EXTENT_DELALLOC, 0, NULL);
+                                EXTENT_DELALLOC, NULL);
 
        return ret;
 }
 
 
                if (count > 0)
                        clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
-                                        0, NULL);
+                                        NULL);
        }
 
        return cow_file_range(inode, locked_page, start, end, page_started,
            !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
                clear_extent_bit(&inode->io_tree, start, end,
                                 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
-                                0, &cached_state);
+                                &cached_state);
 
        btrfs_inode_safe_disk_i_size_write(inode, 0);
        ret = btrfs_update_inode_fallback(trans, root, inode);
        }
        ret = 0;
 out:
-       clear_extent_bit(&inode->io_tree, start, end, clear_bits, 0,
+       clear_extent_bit(&inode->io_tree, start, end, clear_bits,
                         &cached_state);
 
        if (trans)
 
        clear_extent_bit(&inode->io_tree, block_start, block_end,
                         EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
-                        0, &cached_state);
+                        &cached_state);
 
        ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
                                        &cached_state);
                                               end - start + 1);
 
                clear_extent_bit(io_tree, start, end,
-                                EXTENT_LOCKED | EXTENT_DELALLOC |
-                                EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1,
+                                EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
                                 &cached_state);
 
                cond_resched();
        cur = page_start;
        while (cur < page_end) {
                struct btrfs_ordered_extent *ordered;
-               bool delete_states;
                u64 range_end;
                u32 range_len;
+               u32 extra_flags = 0;
 
                ordered = btrfs_lookup_first_ordered_range(inode, cur,
                                                           page_end + 1 - cur);
                         * No ordered extent covering this range, we are safe
                         * to delete all extent states in the range.
                         */
-                       delete_states = true;
+                       extra_flags = EXTENT_CLEAR_ALL_BITS;
                        goto next;
                }
                if (ordered->file_offset > cur) {
                         * the ordered extent in the next iteration.
                         */
                        range_end = ordered->file_offset - 1;
-                       delete_states = true;
+                       extra_flags = EXTENT_CLEAR_ALL_BITS;
                        goto next;
                }
 
                         * We can't delete the extent states as
                         * btrfs_finish_ordered_io() may still use some of them.
                         */
-                       delete_states = false;
                        goto next;
                }
                btrfs_page_clear_ordered(fs_info, &folio->page, cur, range_len);
                        clear_extent_bit(tree, cur, range_end,
                                         EXTENT_DELALLOC |
                                         EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
-                                        EXTENT_DEFRAG, 0, &cached_state);
+                                        EXTENT_DEFRAG, &cached_state);
 
                spin_lock_irq(&inode->ordered_tree.lock);
                set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
                                             cur - ordered->file_offset);
                spin_unlock_irq(&inode->ordered_tree.lock);
 
+               /*
+                * If the ordered extent has finished, we're safe to delete all
+                * the extent states of the range, otherwise
+                * btrfs_finish_ordered_io() will get executed by endio for
+                * other pages, so we can't delete extent states.
+                */
                if (btrfs_dec_test_ordered_pending(inode, &ordered,
                                                   cur, range_end + 1 - cur)) {
                        btrfs_finish_ordered_io(ordered);
                         * The ordered extent has finished, now we're again
                         * safe to delete all extent states of the range.
                         */
-                       delete_states = true;
-               } else {
-                       /*
-                        * btrfs_finish_ordered_io() will get executed by endio
-                        * of other pages, thus we can't delete extent states
-                        * anymore
-                        */
-                       delete_states = false;
+                       extra_flags = EXTENT_CLEAR_ALL_BITS;
                }
 next:
                if (ordered)
                if (!inode_evicting) {
                        clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
                                 EXTENT_DELALLOC | EXTENT_UPTODATE |
-                                EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
-                                delete_states, &cached_state);
+                                EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG |
+                                extra_flags, &cached_state);
                }
                cur = range_end + 1;
        }
         */
        clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
                          EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
-                         EXTENT_DEFRAG, 0, &cached_state);
+                         EXTENT_DEFRAG, &cached_state);
 
        ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
                                        &cached_state);
 
                return ret;
        clear_extent_bit(&inode->io_tree, start, start + len - 1,
                         EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
-                        EXTENT_DEFRAG, 0, cached_state);
+                        EXTENT_DEFRAG, cached_state);
        set_extent_defrag(&inode->io_tree, start, start + len - 1, cached_state);
 
        /* Update the page status */
 
 
        clear_extent_bit(&inode->io_tree, file_offset, range_end,
                         EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
-                        0, NULL);
+                        NULL);
        ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL);
        if (ret)
                goto out_unlock;
 
                               BTRFS_MAX_EXTENT_SIZE >> 1,
                               (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
                               EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
-                              EXTENT_UPTODATE, 0, NULL);
+                              EXTENT_UPTODATE, NULL);
        if (ret) {
                test_err("clear_extent_bit returned %d", ret);
                goto out;
                               BTRFS_MAX_EXTENT_SIZE + sectorsize,
                               BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
                               EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
-                              EXTENT_UPTODATE, 0, NULL);
+                              EXTENT_UPTODATE, NULL);
        if (ret) {
                test_err("clear_extent_bit returned %d", ret);
                goto out;
        /* Empty */
        ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
                               EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
-                              EXTENT_UPTODATE, 0, NULL);
+                              EXTENT_UPTODATE, NULL);
        if (ret) {
                test_err("clear_extent_bit returned %d", ret);
                goto out;
        if (ret)
                clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
                                 EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
-                                EXTENT_UPTODATE, 0, NULL);
+                                EXTENT_UPTODATE, NULL);
        iput(inode);
        btrfs_free_dummy_root(root);
        btrfs_free_dummy_fs_info(fs_info);
 
                 * it's safe to do it (through extent_io_tree_release()).
                 */
                err = clear_extent_bit(dirty_pages, start, end,
-                                      EXTENT_NEED_WAIT, 0, &cached_state);
+                                      EXTENT_NEED_WAIT, &cached_state);
                if (err == -ENOMEM)
                        err = 0;
                if (!err)