It would be better to use atomic variable for total_extent_tree.
Reviewed-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
        si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree);
        si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
        si->total_ext = atomic64_read(&sbi->total_hit_ext);
-       si->ext_tree = sbi->total_ext_tree;
+       si->ext_tree = atomic_read(&sbi->total_ext_tree);
        si->ext_node = atomic_read(&sbi->total_ext_node);
        si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
        si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
        si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
        for (i = 0; i <= UPDATE_INO; i++)
                si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
-       si->cache_mem += sbi->total_ext_tree * sizeof(struct extent_tree);
+       si->cache_mem += atomic_read(&sbi->total_ext_tree) *
+                                               sizeof(struct extent_tree);
        si->cache_mem += atomic_read(&sbi->total_ext_node) *
                                                sizeof(struct extent_node);
 
 
                rwlock_init(&et->lock);
                atomic_set(&et->refcount, 0);
                et->count = 0;
-               sbi->total_ext_tree++;
+               atomic_inc(&sbi->total_ext_tree);
        }
        atomic_inc(&et->refcount);
        up_write(&sbi->extent_tree_lock);
 
                                radix_tree_delete(root, et->ino);
                                kmem_cache_free(extent_tree_slab, et);
-                               sbi->total_ext_tree--;
+                               atomic_dec(&sbi->total_ext_tree);
                                tree_cnt++;
 
                                if (node_cnt + tree_cnt >= nr_shrink)
        f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
        radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
        kmem_cache_free(extent_tree_slab, et);
-       sbi->total_ext_tree--;
+       atomic_dec(&sbi->total_ext_tree);
        up_write(&sbi->extent_tree_lock);
 
        F2FS_I(inode)->extent_tree = NULL;
        init_rwsem(&sbi->extent_tree_lock);
        INIT_LIST_HEAD(&sbi->extent_list);
        spin_lock_init(&sbi->extent_lock);
-       sbi->total_ext_tree = 0;
+       atomic_set(&sbi->total_ext_tree, 0);
        atomic_set(&sbi->total_ext_node, 0);
 }
 
 
        struct rw_semaphore extent_tree_lock;   /* locking extent radix tree */
        struct list_head extent_list;           /* lru list for shrinker */
        spinlock_t extent_lock;                 /* locking extent lru list */
-       int total_ext_tree;                     /* extent tree count */
+       atomic_t total_ext_tree;                /* extent tree count */
        atomic_t total_ext_node;                /* extent info count */
 
        /* basic filesystem units */
 
                                sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT;
                res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
        } else if (type == EXTENT_CACHE) {
-               mem_size = (sbi->total_ext_tree * sizeof(struct extent_tree) +
+               mem_size = (atomic_read(&sbi->total_ext_tree) *
+                               sizeof(struct extent_tree) +
                                atomic_read(&sbi->total_ext_node) *
                                sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT;
                res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
 
 
 static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
 {
-       return sbi->total_ext_tree + atomic_read(&sbi->total_ext_node);
+       return atomic_read(&sbi->total_ext_tree) +
+                               atomic_read(&sbi->total_ext_node);
 }
 
 unsigned long f2fs_shrink_count(struct shrinker *shrink,