btrfs_space_info_update_bytes_pinned(fs_info, space_info,
                                                     -block_group->pinned);
                space_info->bytes_readonly += block_group->pinned;
-               __btrfs_mod_total_bytes_pinned(space_info, -block_group->pinned);
                block_group->pinned = 0;
 
                spin_unlock(&block_group->lock);
                        spin_unlock(&cache->lock);
                        spin_unlock(&cache->space_info->lock);
 
-                       __btrfs_mod_total_bytes_pinned(cache->space_info,
-                                                      num_bytes);
                        set_extent_dirty(&trans->transaction->pinned_extents,
                                         bytenr, bytenr + num_bytes - 1,
                                         GFP_NOFS | __GFP_NOFAIL);
 
        struct btrfs_delayed_ref_root *delayed_refs =
                &trans->transaction->delayed_refs;
        struct btrfs_fs_info *fs_info = trans->fs_info;
-       u64 flags = btrfs_ref_head_to_space_flags(existing);
        int old_ref_mod;
 
        BUG_ON(existing->is_data != update->is_data);
                }
        }
 
-       /*
-        * This handles the following conditions:
-        *
-        * 1. We had a ref mod of 0 or more and went negative, indicating that
-        *    we may be freeing space, so add our space to the
-        *    total_bytes_pinned counter.
-        * 2. We were negative and went to 0 or positive, so no longer can say
-        *    that the space would be pinned, decrement our counter from the
-        *    total_bytes_pinned counter.
-        * 3. We are now at 0 and have ->must_insert_reserved set, which means
-        *    this was a new allocation and then we dropped it, and thus must
-        *    add our space to the total_bytes_pinned counter.
-        */
-       if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
-               btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
-       else if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
-               btrfs_mod_total_bytes_pinned(fs_info, flags, -existing->num_bytes);
-       else if (existing->total_ref_mod == 0 && existing->must_insert_reserved)
-               btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
-
        spin_unlock(&existing->lock);
 }
 
                kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
                head_ref = existing;
        } else {
-               u64 flags = btrfs_ref_head_to_space_flags(head_ref);
-
                if (head_ref->is_data && head_ref->ref_mod < 0) {
                        delayed_refs->pending_csums += head_ref->num_bytes;
                        trans->delayed_ref_updates +=
                                btrfs_csum_bytes_to_leaves(trans->fs_info,
                                                           head_ref->num_bytes);
                }
-               if (head_ref->ref_mod < 0)
-                       btrfs_mod_total_bytes_pinned(trans->fs_info, flags,
-                                                    head_ref->num_bytes);
                delayed_refs->num_heads++;
                delayed_refs->num_heads_ready++;
                atomic_inc(&delayed_refs->num_entries);
 
                        cache->space_info->bytes_reserved -= head->num_bytes;
                        spin_unlock(&cache->lock);
                        spin_unlock(&cache->space_info->lock);
-                       percpu_counter_add_batch(
-                               &cache->space_info->total_bytes_pinned,
-                               head->num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
 
                        btrfs_put_block_group(cache);
 
 
                nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes);
        }
 
-       /*
-        * We were dropping refs, or had a new ref and dropped it, and thus must
-        * adjust down our total_bytes_pinned, the space may or may not have
-        * been pinned and so is accounted for properly in the pinned space by
-        * now.
-        */
-       if (head->total_ref_mod < 0 ||
-           (head->total_ref_mod == 0 && head->must_insert_reserved)) {
-               u64 flags = btrfs_ref_head_to_space_flags(head);
-
-               btrfs_mod_total_bytes_pinned(fs_info, flags, -head->num_bytes);
-       }
-
        btrfs_delayed_refs_rsv_release(fs_info, nr_items);
 }
 
        spin_unlock(&cache->lock);
        spin_unlock(&cache->space_info->lock);
 
-       __btrfs_mod_total_bytes_pinned(cache->space_info, num_bytes);
        set_extent_dirty(&trans->transaction->pinned_extents, bytenr,
                         bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
        return 0;
                cache->pinned -= len;
                btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len);
                space_info->max_extent_size = 0;
-               __btrfs_mod_total_bytes_pinned(space_info, -len);
                if (cache->ro) {
                        space_info->bytes_readonly += len;
                        readonly = true;
 
        if (!space_info)
                return -ENOMEM;
 
-       ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
-                                GFP_KERNEL);
-       if (ret) {
-               kfree(space_info);
-               return ret;
-       }
-
        for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
                INIT_LIST_HEAD(&space_info->block_groups[i]);
        init_rwsem(&space_info->groups_sem);
 
 
        u64 flags;
 
-       /*
-        * bytes_pinned is kept in line with what is actually pinned, as in
-        * we've called update_block_group and dropped the bytes_used counter
-        * and increased the bytes_pinned counter.  However this means that
-        * bytes_pinned does not reflect the bytes that will be pinned once the
-        * delayed refs are flushed, so this counter is inc'ed every time we
-        * call btrfs_free_extent so it is a realtime count of what will be
-        * freed once the transaction is committed.  It will be zeroed every
-        * time the transaction commits.
-        */
-       struct percpu_counter total_bytes_pinned;
-
        struct list_head list;
        /* Protected by the spinlock 'lock'. */
        struct list_head ro_bgs;
 }
 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
                             enum btrfs_reserve_flush_enum flush);
-
-static inline void __btrfs_mod_total_bytes_pinned(
-                                       struct btrfs_space_info *space_info,
-                                       s64 mod)
-{
-       percpu_counter_add_batch(&space_info->total_bytes_pinned, mod,
-                                BTRFS_TOTAL_BYTES_PINNED_BATCH);
-}
-
-static inline void btrfs_mod_total_bytes_pinned(struct btrfs_fs_info *fs_info,
-                                               u64 flags, s64 mod)
-{
-       struct btrfs_space_info *space_info = btrfs_find_space_info(fs_info, flags);
-
-       ASSERT(space_info);
-       __btrfs_mod_total_bytes_pinned(space_info, mod);
-}
-
 #endif /* BTRFS_SPACE_INFO_H */
 
 }                                                                      \
 BTRFS_ATTR(space_info, field, btrfs_space_info_show_##field)
 
-static ssize_t btrfs_space_info_show_total_bytes_pinned(struct kobject *kobj,
-                                                      struct kobj_attribute *a,
-                                                      char *buf)
-{
-       struct btrfs_space_info *sinfo = to_space_info(kobj);
-       s64 val = percpu_counter_sum(&sinfo->total_bytes_pinned);
-       return scnprintf(buf, PAGE_SIZE, "%lld\n", val);
-}
-
 SPACE_INFO_ATTR(flags);
 SPACE_INFO_ATTR(total_bytes);
 SPACE_INFO_ATTR(bytes_used);
 SPACE_INFO_ATTR(bytes_zone_unusable);
 SPACE_INFO_ATTR(disk_used);
 SPACE_INFO_ATTR(disk_total);
-BTRFS_ATTR(space_info, total_bytes_pinned,
-          btrfs_space_info_show_total_bytes_pinned);
 
 static struct attribute *space_info_attrs[] = {
        BTRFS_ATTR_PTR(space_info, flags),
        BTRFS_ATTR_PTR(space_info, bytes_zone_unusable),
        BTRFS_ATTR_PTR(space_info, disk_used),
        BTRFS_ATTR_PTR(space_info, disk_total),
-       BTRFS_ATTR_PTR(space_info, total_bytes_pinned),
        NULL,
 };
 ATTRIBUTE_GROUPS(space_info);
 static void space_info_release(struct kobject *kobj)
 {
        struct btrfs_space_info *sinfo = to_space_info(kobj);
-       percpu_counter_destroy(&sinfo->total_bytes_pinned);
        kfree(sinfo);
 }