if (need_resched() ||
                            rwsem_is_contended(&fs_info->commit_root_sem)) {
-                               if (wakeup)
-                                       caching_ctl->progress = last;
                                btrfs_release_path(path);
                                up_read(&fs_info->commit_root_sem);
                                mutex_unlock(&caching_ctl->mutex);
                        key.objectid = last;
                        key.offset = 0;
                        key.type = BTRFS_EXTENT_ITEM_KEY;
-
-                       if (wakeup)
-                               caching_ctl->progress = last;
                        btrfs_release_path(path);
                        goto next;
                }
 
        total_found += add_new_free_space(block_group, last,
                                block_group->start + block_group->length);
-       caching_ctl->progress = (u64)-1;
 
 out:
        btrfs_free_path(path);
        }
 #endif
 
-       caching_ctl->progress = (u64)-1;
-
        up_read(&fs_info->commit_root_sem);
        btrfs_free_excluded_extents(block_group);
        mutex_unlock(&caching_ctl->mutex);
        mutex_init(&caching_ctl->mutex);
        init_waitqueue_head(&caching_ctl->wait);
        caching_ctl->block_group = cache;
-       caching_ctl->progress = cache->start;
        refcount_set(&caching_ctl->count, 2);
        btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
 
                /* Should not have any excluded extents. Just in case, though. */
                btrfs_free_excluded_extents(cache);
        } else if (cache->length == cache->used) {
-               cache->last_byte_to_unpin = (u64)-1;
                cache->cached = BTRFS_CACHE_FINISHED;
                btrfs_free_excluded_extents(cache);
        } else if (cache->used == 0) {
-               cache->last_byte_to_unpin = (u64)-1;
                cache->cached = BTRFS_CACHE_FINISHED;
                add_new_free_space(cache, cache->start,
                                   cache->start + cache->length);
                /* Fill dummy cache as FULL */
                bg->length = em->len;
                bg->flags = map->type;
-               bg->last_byte_to_unpin = (u64)-1;
                bg->cached = BTRFS_CACHE_FINISHED;
                bg->used = em->len;
                bg->flags = map->type;
        set_free_space_tree_thresholds(cache);
        cache->used = bytes_used;
        cache->flags = type;
-       cache->last_byte_to_unpin = (u64)-1;
        cache->cached = BTRFS_CACHE_FINISHED;
        cache->global_root_id = calculate_global_root_id(fs_info, cache->start);
 
 
        wait_queue_head_t wait;
        struct btrfs_work work;
        struct btrfs_block_group *block_group;
-       u64 progress;
        refcount_t count;
 };
 
        /* Cache tracking stuff */
        int cached;
        struct btrfs_caching_control *caching_ctl;
-       u64 last_byte_to_unpin;
 
        struct btrfs_space_info *space_info;
 
 
                len = cache->start + cache->length - start;
                len = min(len, end + 1 - start);
 
-               down_read(&fs_info->commit_root_sem);
-               if (start < cache->last_byte_to_unpin && return_free_space) {
-                       u64 add_len = min(len, cache->last_byte_to_unpin - start);
-
-                       btrfs_add_free_space(cache, start, add_len);
-               }
-               up_read(&fs_info->commit_root_sem);
+               if (return_free_space)
+                       btrfs_add_free_space(cache, start, len);
 
                start += len;
                total_unpinned += len;
 
                ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY);
                ASSERT(key.objectid < end && key.objectid + key.offset <= end);
 
-               caching_ctl->progress = key.objectid;
-
                offset = key.objectid;
                while (offset < key.objectid + key.offset) {
                        bit = free_space_test_bit(block_group, path, offset);
                goto out;
        }
 
-       caching_ctl->progress = (u64)-1;
-
        ret = 0;
 out:
        return ret;
                ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY);
                ASSERT(key.objectid < end && key.objectid + key.offset <= end);
 
-               caching_ctl->progress = key.objectid;
-
                total_found += add_new_free_space(block_group, key.objectid,
                                                  key.objectid + key.offset);
                if (total_found > CACHING_CTL_WAKE_UP) {
                goto out;
        }
 
-       caching_ctl->progress = (u64)-1;
-
        ret = 0;
 out:
        return ret;
 
        struct btrfs_transaction *cur_trans = trans->transaction;
        struct btrfs_fs_info *fs_info = trans->fs_info;
        struct btrfs_root *root, *tmp;
-       struct btrfs_caching_control *caching_ctl, *next;
 
        /*
         * At this point no one can be using this transaction to modify any tree
        }
        spin_unlock(&cur_trans->dropped_roots_lock);
 
-       /*
-        * We have to update the last_byte_to_unpin under the commit_root_sem,
-        * at the same time we swap out the commit roots.
-        *
-        * This is because we must have a real view of the last spot the caching
-        * kthreads were while caching.  Consider the following views of the
-        * extent tree for a block group
-        *
-        * commit root
-        * +----+----+----+----+----+----+----+
-        * |\\\\|    |\\\\|\\\\|    |\\\\|\\\\|
-        * +----+----+----+----+----+----+----+
-        * 0    1    2    3    4    5    6    7
-        *
-        * new commit root
-        * +----+----+----+----+----+----+----+
-        * |    |    |    |\\\\|    |    |\\\\|
-        * +----+----+----+----+----+----+----+
-        * 0    1    2    3    4    5    6    7
-        *
-        * If the cache_ctl->progress was at 3, then we are only allowed to
-        * unpin [0,1) and [2,3], because the caching thread has already
-        * processed those extents.  We are not allowed to unpin [5,6), because
-        * the caching thread will re-start it's search from 3, and thus find
-        * the hole from [4,6) to add to the free space cache.
-        */
-       write_lock(&fs_info->block_group_cache_lock);
-       list_for_each_entry_safe(caching_ctl, next,
-                                &fs_info->caching_block_groups, list) {
-               struct btrfs_block_group *cache = caching_ctl->block_group;
-
-               if (btrfs_block_group_done(cache)) {
-                       cache->last_byte_to_unpin = (u64)-1;
-                       list_del_init(&caching_ctl->list);
-                       btrfs_put_caching_control(caching_ctl);
-               } else {
-                       cache->last_byte_to_unpin = caching_ctl->progress;
-               }
-       }
-       write_unlock(&fs_info->block_group_cache_lock);
        up_write(&fs_info->commit_root_sem);
 }
 
 
        free = cache->zone_capacity - cache->alloc_offset;
 
        /* We only need ->free_space in ALLOC_SEQ block groups */
-       cache->last_byte_to_unpin = (u64)-1;
        cache->cached = BTRFS_CACHE_FINISHED;
        cache->free_space_ctl->free_space = free;
        cache->zone_unusable = unusable;