num_dirty = root->fs_info->dirty_metadata_bytes;
 
        if (num_dirty > thresh) {
-               balance_dirty_pages_ratelimited_nr(
-                                  root->fs_info->btree_inode->i_mapping, 1);
+               balance_dirty_pages_ratelimited(
+                                  root->fs_info->btree_inode->i_mapping);
        }
        return;
 }
        num_dirty = root->fs_info->dirty_metadata_bytes;
 
        if (num_dirty > thresh) {
-               balance_dirty_pages_ratelimited_nr(
-                                  root->fs_info->btree_inode->i_mapping, 1);
+               balance_dirty_pages_ratelimited(
+                                  root->fs_info->btree_inode->i_mapping);
        }
        return;
 }
 
 
                cond_resched();
 
-               balance_dirty_pages_ratelimited_nr(inode->i_mapping,
-                                                  dirty_pages);
+               balance_dirty_pages_ratelimited(inode->i_mapping);
                if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
                        btrfs_btree_balance_dirty(root, 1);
 
 
                }
 
                defrag_count += ret;
-               balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret);
+               balance_dirty_pages_ratelimited(inode->i_mapping);
                mutex_unlock(&inode->i_mutex);
 
                if (newer_than) {
 
                ret = sd.num_spliced;
 
        if (ret > 0) {
-               unsigned long nr_pages;
                int err;
 
-               nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-
                err = generic_write_sync(out, *ppos, ret);
                if (err)
                        ret = err;
                else
                        *ppos += ret;
 
-               balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
+               balance_dirty_pages_ratelimited(mapping);
        }
 
        return ret;
 
                ret = sd.num_spliced;
 
        if (ret > 0) {
-               unsigned long nr_pages;
                int err;
 
-               nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-
                err = generic_write_sync(out, *ppos, ret);
                if (err)
                        ret = err;
                else
                        *ppos += ret;
-               balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
+               balance_dirty_pages_ratelimited(mapping);
        }
        sb_end_write(inode->i_sb);
 
 
                            unsigned long start_time);
 
 void page_writeback_init(void);
-void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
-                                       unsigned long nr_pages_dirtied);
-
-static inline void
-balance_dirty_pages_ratelimited(struct address_space *mapping)
-{
-       balance_dirty_pages_ratelimited_nr(mapping, 1);
-}
+void balance_dirty_pages_ratelimited(struct address_space *mapping);
 
 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
                                void *data);
 
 }
 
 /*
- * After a task dirtied this many pages, balance_dirty_pages_ratelimited_nr()
+ * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
  * will look to see if it needs to start dirty throttling.
  *
  * If dirty_poll_interval is too low, big NUMA machines will call the expensive
 DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
 
 /**
- * balance_dirty_pages_ratelimited_nr - balance dirty memory state
+ * balance_dirty_pages_ratelimited - balance dirty memory state
  * @mapping: address_space which was dirtied
- * @nr_pages_dirtied: number of pages which the caller has just dirtied
  *
  * Processes which are dirtying memory should call in here once for each page
  * which was newly dirtied.  The function will periodically check the system's
  * limit we decrease the ratelimiting by a lot, to prevent individual processes
  * from overshooting the limit by (ratelimit_pages) each.
  */
-void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
-                                       unsigned long nr_pages_dirtied)
+void balance_dirty_pages_ratelimited(struct address_space *mapping)
 {
        struct backing_dev_info *bdi = mapping->backing_dev_info;
        int ratelimit;
         */
        p = &__get_cpu_var(dirty_throttle_leaks);
        if (*p > 0 && current->nr_dirtied < ratelimit) {
+               unsigned long nr_pages_dirtied;
                nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
                *p -= nr_pages_dirtied;
                current->nr_dirtied += nr_pages_dirtied;
        if (unlikely(current->nr_dirtied >= ratelimit))
                balance_dirty_pages(mapping, current->nr_dirtied);
 }
-EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
+EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
 
 void throttle_vm_writeout(gfp_t gfp_mask)
 {