]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
Btrfs: refactor caching_thread()
authorOmar Sandoval <osandov@fb.com>
Wed, 30 Sep 2015 03:50:33 +0000 (20:50 -0700)
committerShan Hai <shan.hai@oracle.com>
Wed, 23 Aug 2017 05:26:50 +0000 (13:26 +0800)
We're also going to load the free space tree from caching_thread(), so
we should refactor some of the common code.

Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Chris Mason <clm@fb.com>
Orabug: 26274676

(cherry picked from commit 73fa48b674e819098c3bafc47618d0e2868191e5)
Signed-off-by: Shan Hai <shan.hai@oracle.com>
Reviewed-by: Somasundaram Krishnasamy <somasundaram.krishnasamy@oracle.com>
fs/btrfs/ctree.h
fs/btrfs/extent-tree.c

index 5d69a128a723efda9c71e8e1f5fe21bcecd47527..d3a316b59ae530977600589ae96920f8b3368e95 100644 (file)
@@ -1262,6 +1262,9 @@ struct btrfs_caching_control {
        atomic_t count;
 };
 
+/* Once caching_thread() finds this much free space, it will wake up waiters. */
+#define CACHING_CTL_WAKE_UP (1024 * 1024 * 2)
+
 struct btrfs_io_ctl {
        void *cur, *orig;
        struct page *page;
index 16ac710ab8da9ccb9aa8cf96a76d69ecbbd7f6e6..cc4942445615c0fc65664452620746156fd5e271 100644 (file)
@@ -376,11 +376,10 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
        return total_added;
 }
 
-static noinline void caching_thread(struct btrfs_work *work)
+static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
 {
        struct btrfs_block_group_cache *block_group;
        struct btrfs_fs_info *fs_info;
-       struct btrfs_caching_control *caching_ctl;
        struct btrfs_root *extent_root;
        struct btrfs_path *path;
        struct extent_buffer *leaf;
@@ -388,16 +387,15 @@ static noinline void caching_thread(struct btrfs_work *work)
        u64 total_found = 0;
        u64 last = 0;
        u32 nritems;
-       int ret = -ENOMEM;
+       int ret;
 
-       caching_ctl = container_of(work, struct btrfs_caching_control, work);
        block_group = caching_ctl->block_group;
        fs_info = block_group->fs_info;
        extent_root = fs_info->extent_root;
 
        path = btrfs_alloc_path();
        if (!path)
-               goto out;
+               return -ENOMEM;
 
        last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
 
@@ -414,15 +412,11 @@ static noinline void caching_thread(struct btrfs_work *work)
        key.objectid = last;
        key.offset = 0;
        key.type = BTRFS_EXTENT_ITEM_KEY;
-again:
-       mutex_lock(&caching_ctl->mutex);
-       /* need to make sure the commit_root doesn't disappear */
-       down_read(&fs_info->commit_root_sem);
 
 next:
        ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
        if (ret < 0)
-               goto err;
+               goto out;
 
        leaf = path->nodes[0];
        nritems = btrfs_header_nritems(leaf);
@@ -447,12 +441,14 @@ next:
                                up_read(&fs_info->commit_root_sem);
                                mutex_unlock(&caching_ctl->mutex);
                                cond_resched();
-                               goto again;
+                               mutex_lock(&caching_ctl->mutex);
+                               down_read(&fs_info->commit_root_sem);
+                               goto next;
                        }
 
                        ret = btrfs_next_leaf(extent_root, path);
                        if (ret < 0)
-                               goto err;
+                               goto out;
                        if (ret)
                                break;
                        leaf = path->nodes[0];
@@ -490,7 +486,7 @@ next:
                        else
                                last = key.objectid + key.offset;
 
-                       if (total_found > (1024 * 1024 * 2)) {
+                       if (total_found > CACHING_CTL_WAKE_UP) {
                                total_found = 0;
                                wake_up(&caching_ctl->wait);
                        }
@@ -504,25 +500,36 @@ next:
                                          block_group->key.offset);
        caching_ctl->progress = (u64)-1;
 
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
+static noinline void caching_thread(struct btrfs_work *work)
+{
+       struct btrfs_block_group_cache *block_group;
+       struct btrfs_fs_info *fs_info;
+       struct btrfs_caching_control *caching_ctl;
+       int ret;
+
+       caching_ctl = container_of(work, struct btrfs_caching_control, work);
+       block_group = caching_ctl->block_group;
+       fs_info = block_group->fs_info;
+
+       mutex_lock(&caching_ctl->mutex);
+       down_read(&fs_info->commit_root_sem);
+
+       ret = load_extent_tree_free(caching_ctl);
+
        spin_lock(&block_group->lock);
        block_group->caching_ctl = NULL;
-       block_group->cached = BTRFS_CACHE_FINISHED;
+       block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
        spin_unlock(&block_group->lock);
 
-err:
-       btrfs_free_path(path);
        up_read(&fs_info->commit_root_sem);
-
-       free_excluded_extents(extent_root, block_group);
-
+       free_excluded_extents(fs_info->extent_root, block_group);
        mutex_unlock(&caching_ctl->mutex);
-out:
-       if (ret) {
-               spin_lock(&block_group->lock);
-               block_group->caching_ctl = NULL;
-               block_group->cached = BTRFS_CACHE_ERROR;
-               spin_unlock(&block_group->lock);
-       }
+
        wake_up(&caching_ctl->wait);
 
        put_caching_control(caching_ctl);