struct lockdep_map btrfs_trans_num_extwriters_map;
        struct lockdep_map btrfs_state_change_map[4];
        struct lockdep_map btrfs_trans_pending_ordered_map;
+       struct lockdep_map btrfs_ordered_extent_map;
 
 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
        spinlock_t ref_verify_lock;
 
        btrfs_lockdep_init_map(fs_info, btrfs_trans_num_writers);
        btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters);
        btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered);
+       btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent);
        btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_start,
                                     BTRFS_LOCKDEP_TRANS_COMMIT_START);
        btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked,
 
                clear_bits |= EXTENT_DELALLOC_NEW;
 
        freespace_inode = btrfs_is_free_space_inode(inode);
+       if (!freespace_inode)
+               btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent);
 
        if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
                ret = -EIO;
        struct btrfs_ordered_extent *ordered;
        struct btrfs_inode *inode = BTRFS_I(vfs_inode);
        struct btrfs_root *root = inode->root;
+       bool freespace_inode;
 
        WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
        WARN_ON(vfs_inode->i_data.nrpages);
        if (!root)
                return;
 
+       /*
+        * If this is a free space inode do not take the ordered extents lockdep
+        * map.
+        */
+       freespace_inode = btrfs_is_free_space_inode(inode);
+
        while (1) {
                ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
                if (!ordered)
                        btrfs_err(root->fs_info,
                                  "found ordered extent %llu %llu on inode cleanup",
                                  ordered->file_offset, ordered->num_bytes);
+
+                       if (!freespace_inode)
+                               btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent);
+
                        btrfs_remove_ordered_extent(inode, ordered);
                        btrfs_put_ordered_extent(ordered);
                        btrfs_put_ordered_extent(ordered);
 
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct rb_node *node;
        bool pending;
+       bool freespace_inode;
+
+       /*
+        * If this is a free space inode the thread has not acquired the ordered
+        * extents lockdep map.
+        */
+       freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
 
        btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
        /* This is paired with btrfs_add_ordered_extent. */
        }
        spin_unlock(&root->ordered_extent_lock);
        wake_up(&entry->wait);
+       if (!freespace_inode)
+               btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
 }
 
 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
        u64 start = entry->file_offset;
        u64 end = start + entry->num_bytes - 1;
        struct btrfs_inode *inode = BTRFS_I(entry->inode);
+       bool freespace_inode;
 
        trace_btrfs_ordered_extent_start(inode, entry);
 
+       /*
+        * If this is a free space inode do not take the ordered extents lockdep
+        * map.
+        */
+       freespace_inode = btrfs_is_free_space_inode(inode);
+
        /*
         * pages in the range can be dirty, clean or writeback.  We
         * start IO on any dirty ones so the wait doesn't stall waiting
        if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
                filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
        if (wait) {
+               if (!freespace_inode)
+                       btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
                wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
                                                 &entry->flags));
        }