}
 
                page_end = (pg_index << PAGE_SHIFT) + folio_size(folio) - 1;
-               lock_extent(tree, cur, page_end, NULL);
+               btrfs_lock_extent(tree, cur, page_end, NULL);
                read_lock(&em_tree->lock);
                em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
                read_unlock(&em_tree->lock);
                    (extent_map_block_start(em) >> SECTOR_SHIFT) !=
                    orig_bio->bi_iter.bi_sector) {
                        free_extent_map(em);
-                       unlock_extent(tree, cur, page_end, NULL);
+                       btrfs_unlock_extent(tree, cur, page_end, NULL);
                        folio_unlock(folio);
                        folio_put(folio);
                        break;
                }
                add_size = min(em->start + em->len, page_end + 1) - cur;
                free_extent_map(em);
-               unlock_extent(tree, cur, page_end, NULL);
+               btrfs_unlock_extent(tree, cur, page_end, NULL);
 
                if (folio_contains(folio, end_index)) {
                        size_t zero_offset = offset_in_folio(folio, isize);
 
 
                /* Get the big lock and read metadata off disk. */
                if (!locked)
-                       lock_extent(io_tree, start, end, &cached);
+                       btrfs_lock_extent(io_tree, start, end, &cached);
                em = defrag_get_extent(BTRFS_I(inode), start, newer_than);
                if (!locked)
-                       unlock_extent(io_tree, start, end, &cached);
+                       btrfs_unlock_extent(io_tree, start, end, &cached);
 
                if (IS_ERR(em))
                        return NULL;
        while (1) {
                struct btrfs_ordered_extent *ordered;
 
-               lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
+               btrfs_lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
                ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
-               unlock_extent(&inode->io_tree, page_start, page_end,
-                             &cached_state);
+               btrfs_unlock_extent(&inode->io_tree, page_start, page_end,
+                                   &cached_state);
                if (!ordered)
                        break;
 
                folio_wait_writeback(folios[i]);
 
        /* Lock the pages range */
-       lock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
-                   (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
-                   &cached_state);
+       btrfs_lock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
+                         (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
+                         &cached_state);
        /*
         * Now we have a consistent view about the extent map, re-check
         * which range really needs to be defragged.
                kfree(entry);
        }
 unlock_extent:
-       unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
-                     (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
-                     &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
+                           (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
+                           &cached_state);
 free_folios:
        for (i = 0; i < nr_pages; i++) {
                folio_unlock(folios[i]);
 
 
        while (1) {
                if (nowait) {
-                       if (!try_lock_extent(io_tree, lockstart, lockend,
-                                            cached_state)) {
+                       if (!btrfs_try_lock_extent(io_tree, lockstart, lockend,
+                                                  cached_state)) {
                                ret = -EAGAIN;
                                break;
                        }
                } else {
-                       lock_extent(io_tree, lockstart, lockend, cached_state);
+                       btrfs_lock_extent(io_tree, lockstart, lockend, cached_state);
                }
                /*
                 * We're concerned with the entire range that we're going to be
                                                         lockstart, lockend)))
                        break;
 
-               unlock_extent(io_tree, lockstart, lockend, cached_state);
+               btrfs_unlock_extent(io_tree, lockstart, lockend, cached_state);
 
                if (ordered) {
                        if (nowait) {
 
 bool __try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
                       struct extent_state **cached);
 
-static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
-                             struct extent_state **cached)
+static inline int btrfs_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
+                                   struct extent_state **cached)
 {
        return __lock_extent(tree, start, end, EXTENT_LOCKED, cached);
 }
 
-static inline bool try_lock_extent(struct extent_io_tree *tree, u64 start,
-                                  u64 end, struct extent_state **cached)
+static inline bool btrfs_try_lock_extent(struct extent_io_tree *tree, u64 start,
+                                        u64 end, struct extent_state **cached)
 {
        return __try_lock_extent(tree, start, end, EXTENT_LOCKED, cached);
 }
        return __clear_extent_bit(tree, start, end, bits, cached, NULL);
 }
 
-static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
-                               struct extent_state **cached)
+static inline int btrfs_unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
+                                     struct extent_state **cached)
 {
        return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached, NULL);
 }
 
        }
 
        /* step three, lock the state bits for the whole range */
-       lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
+       btrfs_lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
 
        /* then test to make sure it is all still delalloc */
        ret = test_range_bit(tree, delalloc_start, delalloc_end,
                             EXTENT_DELALLOC, cached_state);
 
-       unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
+       btrfs_unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
        if (!ret) {
                unlock_delalloc_folio(inode, locked_folio, delalloc_start,
                                      delalloc_end);
        ASSERT(IS_ALIGNED(end + 1, PAGE_SIZE));
 
 again:
-       lock_extent(&inode->io_tree, start, end, cached_state);
+       btrfs_lock_extent(&inode->io_tree, start, end, cached_state);
        cur_pos = start;
        while (cur_pos < end) {
                struct btrfs_ordered_extent *ordered;
                }
 
                /* Now wait for the OE to finish. */
-               unlock_extent(&inode->io_tree, start, end, cached_state);
+               btrfs_unlock_extent(&inode->io_tree, start, end, cached_state);
                btrfs_start_ordered_extent_nowriteback(ordered, start, end + 1 - start);
                btrfs_put_ordered_extent(ordered);
                /* We have unlocked the whole range, restart from the beginning. */
 
        lock_extents_for_read(inode, start, end, &cached_state);
        ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
-       unlock_extent(&inode->io_tree, start, end, &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
 
        free_extent_map(em_cached);
 
                         * We've hit an error during previous delalloc range,
                         * have to cleanup the remaining locked ranges.
                         */
-                       unlock_extent(&inode->io_tree, found_start,
-                                     found_start + found_len - 1, NULL);
+                       btrfs_unlock_extent(&inode->io_tree, found_start,
+                                           found_start + found_len - 1, NULL);
                        unlock_delalloc_folio(&inode->vfs_inode, folio,
                                              found_start,
                                              found_start + found_len - 1);
        while ((folio = readahead_folio(rac)) != NULL)
                btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
 
-       unlock_extent(&inode->io_tree, start, end, &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
 
        if (em_cached)
                free_extent_map(em_cached);
        if (start > end)
                return 0;
 
-       lock_extent(tree, start, end, &cached_state);
+       btrfs_lock_extent(tree, start, end, &cached_state);
        folio_wait_writeback(folio);
 
        /*
         * so here we only need to unlock the extent range to free any
         * existing extent state.
         */
-       unlock_extent(tree, start, end, &cached_state);
+       btrfs_unlock_extent(tree, start, end, &cached_state);
        return 0;
 }
 
 
                goto out_free_pre;
        }
 
-       lock_extent(&inode->io_tree, start, start + len - 1, NULL);
+       btrfs_lock_extent(&inode->io_tree, start, start + len - 1, NULL);
        write_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, start, len);
        if (!em) {
 
 out_unlock:
        write_unlock(&em_tree->lock);
-       unlock_extent(&inode->io_tree, start, start + len - 1, NULL);
+       btrfs_unlock_extent(&inode->io_tree, start, start + len - 1, NULL);
        free_extent_map(split_mid);
 out_free_pre:
        free_extent_map(split_pre);
 
        range_end = round_up(start + len, sectorsize);
        prev_extent_end = range_start;
 
-       lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
+       btrfs_lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
 
        ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
        if (ret < 0)
        }
 
 out_unlock:
-       unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
 
        if (ret == BTRFS_FIEMAP_FLUSH_CACHE) {
                btrfs_release_path(path);
 
                struct btrfs_ordered_extent *ordered;
 
                if (nowait) {
-                       if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
-                                            cached_state)) {
+                       if (!btrfs_try_lock_extent(&inode->io_tree, start_pos,
+                                                  last_pos, cached_state)) {
                                folio_unlock(folio);
                                folio_put(folio);
                                return -EAGAIN;
                        }
                } else {
-                       lock_extent(&inode->io_tree, start_pos, last_pos, cached_state);
+                       btrfs_lock_extent(&inode->io_tree, start_pos, last_pos,
+                                         cached_state);
                }
 
                ordered = btrfs_lookup_ordered_range(inode, start_pos,
                if (ordered &&
                    ordered->file_offset + ordered->num_bytes > start_pos &&
                    ordered->file_offset <= last_pos) {
-                       unlock_extent(&inode->io_tree, start_pos, last_pos,
-                                     cached_state);
+                       btrfs_unlock_extent(&inode->io_tree, start_pos, last_pos,
+                                           cached_state);
                        folio_unlock(folio);
                        folio_put(folio);
                        btrfs_start_ordered_extent(ordered);
        else
                *write_bytes = min_t(size_t, *write_bytes ,
                                     num_bytes - pos + lockstart);
-       unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
 
        return ret;
 }
                /* No copied bytes, unlock, release reserved space and exit. */
                if (copied == 0) {
                        if (extents_locked)
-                               unlock_extent(&inode->io_tree, lockstart, lockend,
-                                             &cached_state);
+                               btrfs_unlock_extent(&inode->io_tree, lockstart, lockend,
+                                                   &cached_state);
                        else
                                free_extent_state(cached_state);
                        btrfs_delalloc_release_extents(inode, reserved_len);
         * to avoid a memory leak.
         */
        if (extents_locked)
-               unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+               btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
        else
                free_extent_state(cached_state);
 
        }
        folio_wait_writeback(folio);
 
-       lock_extent(io_tree, page_start, page_end, &cached_state);
+       btrfs_lock_extent(io_tree, page_start, page_end, &cached_state);
        ret2 = set_folio_extent_mapped(folio);
        if (ret2 < 0) {
                ret = vmf_error(ret2);
-               unlock_extent(io_tree, page_start, page_end, &cached_state);
+               btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
                goto out_unlock;
        }
 
         */
        ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, fsize);
        if (ordered) {
-               unlock_extent(io_tree, page_start, page_end, &cached_state);
+               btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
                folio_unlock(folio);
                up_read(&BTRFS_I(inode)->i_mmap_lock);
                btrfs_start_ordered_extent(ordered);
        ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
                                        &cached_state);
        if (ret2) {
-               unlock_extent(io_tree, page_start, page_end, &cached_state);
+               btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
                ret = VM_FAULT_SIGBUS;
                goto out_unlock;
        }
 
        btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
 
-       unlock_extent(io_tree, page_start, page_end, &cached_state);
+       btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
        up_read(&BTRFS_I(inode)->i_mmap_lock);
 
        btrfs_delalloc_release_extents(BTRFS_I(inode), fsize);
        while (1) {
                truncate_pagecache_range(inode, lockstart, lockend);
 
-               lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
-                           cached_state);
+               btrfs_lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+                                 cached_state);
                /*
                 * We can't have ordered extents in the range, nor dirty/writeback
                 * pages, because we have locked the inode's VFS lock in exclusive
                if (!check_range_has_page(inode, lockstart, lockend))
                        break;
 
-               unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
-                             cached_state);
+               btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+                                   cached_state);
        }
 
        btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
        btrfs_end_transaction(trans);
        btrfs_btree_balance_dirty(fs_info);
 out:
-       unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
-                     &cached_state);
+       btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+                           &cached_state);
 out_only_mutex:
        if (!updated_inode && truncated_block && !ret) {
                /*
                ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
                                                alloc_start, bytes_to_reserve);
                if (ret) {
-                       unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
-                                     lockend, &cached_state);
+                       btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
+                                           lockend, &cached_state);
                        goto out;
                }
                ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
                                                alloc_end - alloc_start,
                                                fs_info->sectorsize,
                                                offset + len, &alloc_hint);
-               unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
-                             &cached_state);
+               btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+                                   &cached_state);
                /* btrfs_prealloc_file_range releases reserved space on error */
                if (ret) {
                        space_reserved = false;
        }
 
        locked_end = alloc_end - 1;
-       lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
-                   &cached_state);
+       btrfs_lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
+                         &cached_state);
 
        btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
 
         */
        ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
 out_unlock:
-       unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
-                     &cached_state);
+       btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
+                           &cached_state);
 out:
        btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
        extent_changeset_free(data_reserved);
 
        last_extent_end = lockstart;
 
-       lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+       btrfs_lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
 
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        if (ret < 0) {
        }
 
 out:
-       unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
        btrfs_free_path(path);
 
        if (ret < 0)
 
        btrfs_i_size_write(inode, 0);
        truncate_pagecache(vfs_inode, 0);
 
-       lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
+       btrfs_lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
        btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
 
        /*
        inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
        btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
 
-       unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
        if (ret)
                goto fail;
 
                           struct extent_state **cached_state)
 {
        io_ctl_drop_pages(io_ctl);
-       unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
-                     cached_state);
+       btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+                           cached_state);
 }
 
 static int __btrfs_wait_cache_io(struct btrfs_root *root,
        if (ret)
                goto out_unlock;
 
-       lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
-                   &cached_state);
+       btrfs_lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+                         &cached_state);
 
        io_ctl_set_generation(io_ctl, trans->transid);
 
        io_ctl_drop_pages(io_ctl);
        io_ctl_free(io_ctl);
 
-       unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
-                     &cached_state);
+       btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+                           &cached_state);
 
        /*
         * at this point the pages are under IO and we're happy,
 
        if (!can_cow_file_range_inline(inode, offset, size, compressed_size))
                return 1;
 
-       lock_extent(&inode->io_tree, offset, end, &cached);
+       btrfs_lock_extent(&inode->io_tree, offset, end, &cached);
        ret = __cow_file_range_inline(inode, size, compressed_size,
                                      compress_type, compressed_folio,
                                      update_i_size);
        if (ret > 0) {
-               unlock_extent(&inode->io_tree, offset, end, &cached);
+               btrfs_unlock_extent(&inode->io_tree, offset, end, &cached);
                return ret;
        }
 
                goto done;
        }
 
-       lock_extent(io_tree, start, end, &cached);
+       btrfs_lock_extent(io_tree, start, end, &cached);
 
        /* Here we're doing allocation and writeback of the compressed pages */
        file_extent.disk_bytenr = ins.objectid;
                 * Locked range will be released either during error clean up or
                 * after the whole range is finished.
                 */
-               lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1,
-                           &cached);
+               btrfs_lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1,
+                                 &cached);
 
                em = btrfs_create_io_em(inode, start, &file_extent,
                                        BTRFS_ORDERED_REGULAR);
                if (IS_ERR(em)) {
-                       unlock_extent(&inode->io_tree, start,
-                                     start + cur_alloc_size - 1, &cached);
+                       btrfs_unlock_extent(&inode->io_tree, start,
+                                           start + cur_alloc_size - 1, &cached);
                        ret = PTR_ERR(em);
                        goto out_reserve;
                }
                ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
                                                     1 << BTRFS_ORDERED_REGULAR);
                if (IS_ERR(ordered)) {
-                       unlock_extent(&inode->io_tree, start,
-                                     start + cur_alloc_size - 1, &cached);
+                       btrfs_unlock_extent(&inode->io_tree, start,
+                                           start + cur_alloc_size - 1, &cached);
                        ret = PTR_ERR(ordered);
                        goto out_drop_extent_cache;
                }
         * group that contains that extent to RO mode and therefore force COW
         * when starting writeback.
         */
-       lock_extent(io_tree, start, end, &cached_state);
+       btrfs_lock_extent(io_tree, start, end, &cached_state);
        count = count_range_bits(io_tree, &range_start, end, range_bytes,
                                 EXTENT_NORESERVE, 0, NULL);
        if (count > 0 || is_space_ino || is_reloc_ino) {
                if (count > 0)
                        clear_extent_bits(io_tree, start, end, EXTENT_NORESERVE);
        }
-       unlock_extent(io_tree, start, end, &cached_state);
+       btrfs_unlock_extent(io_tree, start, end, &cached_state);
 
        /*
         * Don't try to create inline extents, as a mix of inline extent that
        u64 end = file_pos + len - 1;
        int ret = 0;
 
-       lock_extent(&inode->io_tree, file_pos, end, cached);
+       btrfs_lock_extent(&inode->io_tree, file_pos, end, cached);
 
        if (is_prealloc) {
                struct extent_map *em;
                em = btrfs_create_io_em(inode, file_pos, &nocow_args->file_extent,
                                        BTRFS_ORDERED_PREALLOC);
                if (IS_ERR(em)) {
-                       unlock_extent(&inode->io_tree, file_pos, end, cached);
+                       btrfs_unlock_extent(&inode->io_tree, file_pos, end, cached);
                        return PTR_ERR(em);
                }
                free_extent_map(em);
        if (IS_ERR(ordered)) {
                if (is_prealloc)
                        btrfs_drop_extent_map_range(inode, file_pos, end, false);
-               unlock_extent(&inode->io_tree, file_pos, end, cached);
+               btrfs_unlock_extent(&inode->io_tree, file_pos, end, cached);
                return PTR_ERR(ordered);
        }
 
        if (cur_offset < end) {
                struct extent_state *cached = NULL;
 
-               lock_extent(&inode->io_tree, cur_offset, end, &cached);
+               btrfs_lock_extent(&inode->io_tree, cur_offset, end, &cached);
                extent_clear_unlock_delalloc(inode, cur_offset, end,
                                             locked_folio, &cached,
                                             EXTENT_LOCKED | EXTENT_DELALLOC |
        if (ret)
                goto out_page;
 
-       lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
+       btrfs_lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
 
        /* already ordered? We're done */
        if (folio_test_ordered(folio))
 
        ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
        if (ordered) {
-               unlock_extent(&inode->io_tree, page_start, page_end,
-                             &cached_state);
+               btrfs_unlock_extent(&inode->io_tree, page_start, page_end,
+                                   &cached_state);
                folio_unlock(folio);
                btrfs_start_ordered_extent(ordered);
                btrfs_put_ordered_extent(ordered);
        if (free_delalloc_space)
                btrfs_delalloc_release_space(inode, data_reserved, page_start,
                                             PAGE_SIZE, true);
-       unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
 out_page:
        if (ret) {
                /*
 
        folio_wait_writeback(folio);
 
-       lock_extent(io_tree, block_start, block_end, &cached_state);
+       btrfs_lock_extent(io_tree, block_start, block_end, &cached_state);
 
        ordered = btrfs_lookup_ordered_extent(inode, block_start);
        if (ordered) {
-               unlock_extent(io_tree, block_start, block_end, &cached_state);
+               btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
                folio_unlock(folio);
                folio_put(folio);
                btrfs_start_ordered_extent(ordered);
        ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
                                        &cached_state);
        if (ret) {
-               unlock_extent(io_tree, block_start, block_end, &cached_state);
+               btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
                goto out_unlock;
        }
 
                                  block_end + 1 - block_start);
        btrfs_folio_set_dirty(fs_info, folio, block_start,
                              block_end + 1 - block_start);
-       unlock_extent(io_tree, block_start, block_end, &cached_state);
+       btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
 
        if (only_release_metadata)
                set_extent_bit(&inode->io_tree, block_start, block_end,
                        break;
        }
        free_extent_map(em);
-       unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
+       btrfs_unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
        return ret;
 }
 
                state_flags = state->state;
                spin_unlock(&io_tree->lock);
 
-               lock_extent(io_tree, start, end, &cached_state);
+               btrfs_lock_extent(io_tree, start, end, &cached_state);
 
                /*
                 * If still has DELALLOC flag, the extent didn't reach disk,
        }
 
        if (!inode_evicting)
-               lock_extent(tree, page_start, page_end, &cached_state);
+               btrfs_lock_extent(tree, page_start, page_end, &cached_state);
 
        cur = page_start;
        while (cur < page_end) {
                const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
 
                control.new_size = new_size;
-               lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
+               btrfs_lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
                /*
                 * We want to drop from the next block forward in case this new
                 * size is not block aligned since we will be keeping the last
                inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
                btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
 
-               unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
+               btrfs_unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
 
                trans->block_rsv = &fs_info->trans_block_rsv;
                if (ret != -ENOSPC && ret != -EAGAIN)
 
        read_extent_buffer(leaf, tmp, ptr, count);
        btrfs_release_path(path);
-       unlock_extent(io_tree, start, lockend, cached_state);
+       btrfs_unlock_extent(io_tree, start, lockend, cached_state);
        btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
        *unlocked = true;
 
        if (ret)
                goto out;
 
-       unlock_extent(io_tree, start, lockend, cached_state);
+       btrfs_unlock_extent(io_tree, start, lockend, cached_state);
        btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
        *unlocked = true;
 
                        goto out_unlock_inode;
                }
 
-               if (!try_lock_extent(io_tree, start, lockend, cached_state)) {
+               if (!btrfs_try_lock_extent(io_tree, start, lockend, cached_state)) {
                        ret = -EAGAIN;
                        goto out_unlock_inode;
                }
                                                     lockend - start + 1);
                if (ordered) {
                        btrfs_put_ordered_extent(ordered);
-                       unlock_extent(io_tree, start, lockend, cached_state);
+                       btrfs_unlock_extent(io_tree, start, lockend, cached_state);
                        ret = -EAGAIN;
                        goto out_unlock_inode;
                }
                        if (ret)
                                goto out_unlock_inode;
 
-                       lock_extent(io_tree, start, lockend, cached_state);
+                       btrfs_lock_extent(io_tree, start, lockend, cached_state);
                        ordered = btrfs_lookup_ordered_range(inode, start,
                                                             lockend - start + 1);
                        if (!ordered)
                                break;
                        btrfs_put_ordered_extent(ordered);
-                       unlock_extent(io_tree, start, lockend, cached_state);
+                       btrfs_unlock_extent(io_tree, start, lockend, cached_state);
                        cond_resched();
                }
        }
        em = NULL;
 
        if (*disk_bytenr == EXTENT_MAP_HOLE) {
-               unlock_extent(io_tree, start, lockend, cached_state);
+               btrfs_unlock_extent(io_tree, start, lockend, cached_state);
                btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
                unlocked = true;
                ret = iov_iter_zero(count, iter);
 out_unlock_extent:
        /* Leave inode and extent locked if we need to do a read. */
        if (!unlocked && ret != -EIOCBQUEUED)
-               unlock_extent(io_tree, start, lockend, cached_state);
+               btrfs_unlock_extent(io_tree, start, lockend, cached_state);
 out_unlock_inode:
        if (!unlocked && ret != -EIOCBQUEUED)
                btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
                                                    end >> PAGE_SHIFT);
                if (ret)
                        goto out_folios;
-               lock_extent(io_tree, start, end, &cached_state);
+               btrfs_lock_extent(io_tree, start, end, &cached_state);
                ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
                if (!ordered &&
                    !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
                        break;
                if (ordered)
                        btrfs_put_ordered_extent(ordered);
-               unlock_extent(io_tree, start, end, &cached_state);
+               btrfs_unlock_extent(io_tree, start, end, &cached_state);
                cond_resched();
        }
 
        if (start + encoded->len > inode->vfs_inode.i_size)
                i_size_write(&inode->vfs_inode, start + encoded->len);
 
-       unlock_extent(io_tree, start, end, &cached_state);
+       btrfs_unlock_extent(io_tree, start, end, &cached_state);
 
        btrfs_delalloc_release_extents(inode, num_bytes);
 
        if (!extent_reserved)
                btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes);
 out_unlock:
-       unlock_extent(io_tree, start, end, &cached_state);
+       btrfs_unlock_extent(io_tree, start, end, &cached_state);
 out_folios:
        for (i = 0; i < nr_folios; i++) {
                if (folios[i])
 
        isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
 
-       lock_extent(io_tree, 0, isize - 1, &cached_state);
+       btrfs_lock_extent(io_tree, 0, isize - 1, &cached_state);
        while (prev_extent_end < isize) {
                struct btrfs_key key;
                struct extent_buffer *leaf;
        if (!IS_ERR_OR_NULL(map))
                btrfs_free_chunk_map(map);
 
-       unlock_extent(io_tree, 0, isize - 1, &cached_state);
+       btrfs_unlock_extent(io_tree, 0, isize - 1, &cached_state);
 
        if (ret)
                btrfs_swap_deactivate(file);
 
                                                 args.compression, &unlocked);
 
                if (!unlocked) {
-                       unlock_extent(io_tree, start, lockend, &cached_state);
+                       btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
                        btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
                }
        }
        ret = priv->count;
 
 out:
-       unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state);
+       btrfs_unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state);
        btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
 
        io_uring_cmd_done(cmd, ret, 0, issue_flags);
        return -EIOCBQUEUED;
 
 out_fail:
-       unlock_extent(io_tree, start, lockend, &cached_state);
+       btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
        btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
        kfree(priv);
        return ret;
                         (const char *)&data->args + copy_end_kernel,
                         sizeof(data->args) - copy_end_kernel)) {
                if (ret == -EIOCBQUEUED) {
-                       unlock_extent(io_tree, start, lockend, &cached_state);
+                       btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
                        btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
                }
                ret = -EFAULT;
 
                cachedp = cached_state;
 
        while (1) {
-               lock_extent(&inode->io_tree, start, end, cachedp);
+               btrfs_lock_extent(&inode->io_tree, start, end, cachedp);
                ordered = btrfs_lookup_ordered_range(inode, start,
                                                     end - start + 1);
                if (!ordered) {
                                refcount_dec(&cache->refs);
                        break;
                }
-               unlock_extent(&inode->io_tree, start, end, cachedp);
+               btrfs_unlock_extent(&inode->io_tree, start, end, cachedp);
                btrfs_start_ordered_extent(ordered);
                btrfs_put_ordered_extent(ordered);
        }
 {
        struct btrfs_ordered_extent *ordered;
 
-       if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
+       if (!btrfs_try_lock_extent(&inode->io_tree, start, end, cached_state))
                return false;
 
        ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
                return true;
 
        btrfs_put_ordered_extent(ordered);
-       unlock_extent(&inode->io_tree, start, end, cached_state);
+       btrfs_unlock_extent(&inode->io_tree, start, end, cached_state);
 
        return false;
 }
 
         * because we have already locked the inode's i_mmap_lock in exclusive
         * mode.
         */
-       lock_extent(&dst->io_tree, dst_loff, end, &cached_state);
+       btrfs_lock_extent(&dst->io_tree, dst_loff, end, &cached_state);
        ret = btrfs_clone(&src->vfs_inode, &dst->vfs_inode, loff, len,
                          ALIGN(len, bs), dst_loff, 1);
-       unlock_extent(&dst->io_tree, dst_loff, end, &cached_state);
+       btrfs_unlock_extent(&dst->io_tree, dst_loff, end, &cached_state);
 
        btrfs_btree_balance_dirty(fs_info);
 
         * mode.
         */
        end = destoff + len - 1;
-       lock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
+       btrfs_lock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
        ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
-       unlock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
+       btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
 
        /*
         * We may have copied an inline extent into a page of the destination
 
                                /* Take mmap lock to serialize with reflinks. */
                                if (!down_read_trylock(&inode->i_mmap_lock))
                                        continue;
-                               ret = try_lock_extent(&inode->io_tree, key.offset,
-                                                     end, &cached_state);
+                               ret = btrfs_try_lock_extent(&inode->io_tree, key.offset,
+                                                           end, &cached_state);
                                if (!ret) {
                                        up_read(&inode->i_mmap_lock);
                                        continue;
                                }
 
                                btrfs_drop_extent_map_range(inode, key.offset, end, true);
-                               unlock_extent(&inode->io_tree, key.offset, end,
-                                             &cached_state);
+                               btrfs_unlock_extent(&inode->io_tree, key.offset, end,
+                                                   &cached_state);
                                up_read(&inode->i_mmap_lock);
                        }
                }
                }
 
                /* the lock_extent waits for read_folio to complete */
-               lock_extent(&inode->io_tree, start, end, &cached_state);
+               btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
                btrfs_drop_extent_map_range(inode, start, end, true);
-               unlock_extent(&inode->io_tree, start, end, &cached_state);
+               btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
        }
        return 0;
 }
                else
                        end = cluster->end - offset;
 
-               lock_extent(&inode->io_tree, start, end, &cached_state);
+               btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
                num_bytes = end + 1 - start;
                ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
                                                num_bytes, num_bytes,
                                                end + 1, &alloc_hint);
                cur_offset = end + 1;
-               unlock_extent(&inode->io_tree, start, end, &cached_state);
+               btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
                if (ret)
                        break;
        }
        em->ram_bytes = em->len;
        em->flags |= EXTENT_FLAG_PINNED;
 
-       lock_extent(&inode->io_tree, start, end, &cached_state);
+       btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
        ret = btrfs_replace_extent_map_range(inode, em, false);
-       unlock_extent(&inode->io_tree, start, end, &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
        free_extent_map(em);
 
        return ret;
                        goto release_folio;
 
                /* Mark the range delalloc and dirty for later writeback */
-               lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
-                           &cached_state);
+               btrfs_lock_extent(&BTRFS_I(inode)->io_tree, clamped_start,
+                                 clamped_end, &cached_state);
                ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
                                                clamped_end, 0, &cached_state);
                if (ret) {
                                       boundary_start, boundary_end,
                                       EXTENT_BOUNDARY, NULL);
                }
-               unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
-                             &cached_state);
+               btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
+                                   &cached_state);
                btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
                cur += clamped_len;
 
 
                        sectorsize - 1, start, end);
                goto out_bits;
        }
-       unlock_extent(tmp, start, end, NULL);
+       btrfs_unlock_extent(tmp, start, end, NULL);
        unlock_page(locked_page);
        put_page(locked_page);
 
                test_err("there were unlocked pages in the range");
                goto out_bits;
        }
-       unlock_extent(tmp, start, end, NULL);
+       btrfs_unlock_extent(tmp, start, end, NULL);
        /* locked_page was unlocked above */
        put_page(locked_page);
 
                test_err("pages in range were not all locked");
                goto out_bits;
        }
-       unlock_extent(tmp, start, end, NULL);
+       btrfs_unlock_extent(tmp, start, end, NULL);
 
        /*
         * Now to test where we run into a page that is no longer dirty in the
 
         * file which happens to refer to the same extent as well. Such races
         * can leave checksum items in the log with overlapping ranges.
         */
-       ret = lock_extent(&log_root->log_csum_range, sums->logical, lock_end,
-                         &cached_state);
+       ret = btrfs_lock_extent(&log_root->log_csum_range, sums->logical, lock_end,
+                               &cached_state);
        if (ret)
                return ret;
        /*
        if (!ret)
                ret = btrfs_csum_file_blocks(trans, log_root, sums);
 
-       unlock_extent(&log_root->log_csum_range, sums->logical, lock_end,
-                     &cached_state);
+       btrfs_unlock_extent(&log_root->log_csum_range, sums->logical, lock_end,
+                           &cached_state);
 
        return ret;
 }