* to allocate @blocks
  * Worse case is one block per extent
  */
-int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks)
+int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
 {
-       int lcap, icap, rcap, leafs, idxs, num;
-       int newextents = blocks;
-
-       rcap = ext4_ext_space_root_idx(inode, 0);
-       lcap = ext4_ext_space_block(inode, 0);
-       icap = ext4_ext_space_block_idx(inode, 0);
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       int idxs, num = 0;
 
-       /* number of new leaf blocks needed */
-       num = leafs = (newextents + lcap - 1) / lcap;
+       idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
+               / sizeof(struct ext4_extent_idx));
 
        /*
-        * Worse case, we need separate index block(s)
-        * to link all new leaf blocks
+        * If the new delayed allocation block is contiguous with the
+        * previous da block, it can share index blocks with the
+        * previous block, so we only need to allocate a new index
+        * block every idxs leaf blocks.  At ldxs**2 blocks, we need
+        * an additional index block, and at ldxs**3 blocks, yet
+        * another index blocks.
         */
-       idxs = (leafs + icap - 1) / icap;
-       do {
-               num += idxs;
-               idxs = (idxs + icap - 1) / icap;
-       } while (idxs > rcap);
+       if (ei->i_da_metadata_calc_len &&
+           ei->i_da_metadata_calc_last_lblock+1 == lblock) {
+               if ((ei->i_da_metadata_calc_len % idxs) == 0)
+                       num++;
+               if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
+                       num++;
+               if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
+                       num++;
+                       ei->i_da_metadata_calc_len = 0;
+               } else
+                       ei->i_da_metadata_calc_len++;
+               ei->i_da_metadata_calc_last_lblock++;
+               return num;
+       }
 
-       return num;
+       /*
+        * In the worst case we need a new set of index blocks at
+        * every level of the inode's extent tree.
+        */
+       ei->i_da_metadata_calc_len = 1;
+       ei->i_da_metadata_calc_last_lblock = lblock;
+       return ext_depth(inode) + 1;
 }
 
 static int
 
        return &EXT4_I(inode)->i_reserved_quota;
 }
 #endif
+
 /*
  * Calculate the number of metadata blocks need to reserve
- * to allocate @blocks for non extent file based file
+ * to allocate a new block at @lblocks for non extent file based file
  */
-static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
+static int ext4_indirect_calc_metadata_amount(struct inode *inode,
+                                             sector_t lblock)
 {
-       int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
-       int ind_blks, dind_blks, tind_blks;
-
-       /* number of new indirect blocks needed */
-       ind_blks = (blocks + icap - 1) / icap;
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       int dind_mask = EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1;
+       int blk_bits;
 
-       dind_blks = (ind_blks + icap - 1) / icap;
+       if (lblock < EXT4_NDIR_BLOCKS)
+               return 0;
 
-       tind_blks = 1;
+       lblock -= EXT4_NDIR_BLOCKS;
 
-       return ind_blks + dind_blks + tind_blks;
+       if (ei->i_da_metadata_calc_len &&
+           (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
+               ei->i_da_metadata_calc_len++;
+               return 0;
+       }
+       ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
+       ei->i_da_metadata_calc_len = 1;
+       blk_bits = roundup_pow_of_two(lblock + 1);
+       return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
 }
 
 /*
  * Calculate the number of metadata blocks need to reserve
- * to allocate given number of blocks
+ * to allocate a block located at @lblock
  */
-static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
+static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
 {
-       if (!blocks)
-               return 0;
-
        if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
-               return ext4_ext_calc_metadata_amount(inode, blocks);
+               return ext4_ext_calc_metadata_amount(inode, lblock);
 
-       return ext4_indirect_calc_metadata_amount(inode, blocks);
+       return ext4_indirect_calc_metadata_amount(inode, lblock);
 }
 
 /*
                 */
                mdb_free = ei->i_reserved_meta_blocks;
                ei->i_reserved_meta_blocks = 0;
+               ei->i_da_metadata_calc_len = 0;
                percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
        }
        spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
        return ret ? ret : copied;
 }
 
-static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
+/*
+ * Reserve a single block located at lblock
+ */
+static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
 {
        int retries = 0;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        struct ext4_inode_info *ei = EXT4_I(inode);
-       unsigned long md_needed, md_reserved, total = 0;
+       unsigned long md_needed, md_reserved;
 
        /*
         * recalculate the amount of metadata blocks to reserve
 repeat:
        spin_lock(&ei->i_block_reservation_lock);
        md_reserved = ei->i_reserved_meta_blocks;
-       md_needed = ext4_calc_metadata_amount(inode, nrblocks);
-       total = md_needed + nrblocks;
+       md_needed = ext4_calc_metadata_amount(inode, lblock);
        spin_unlock(&ei->i_block_reservation_lock);
 
        /*
         * later. Real quota accounting is done at pages writeout
         * time.
         */
-       if (vfs_dq_reserve_block(inode, total)) {
+       if (vfs_dq_reserve_block(inode, md_needed + 1)) {
                /* 
                 * We tend to badly over-estimate the amount of
                 * metadata blocks which are needed, so if we have
                return -EDQUOT;
        }
 
-       if (ext4_claim_free_blocks(sbi, total)) {
-               vfs_dq_release_reservation_block(inode, total);
+       if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
+               vfs_dq_release_reservation_block(inode, md_needed + 1);
                if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
                retry:
                        if (md_reserved)
                return -ENOSPC;
        }
        spin_lock(&ei->i_block_reservation_lock);
-       ei->i_reserved_data_blocks += nrblocks;
+       ei->i_reserved_data_blocks++;
        ei->i_reserved_meta_blocks += md_needed;
        spin_unlock(&ei->i_block_reservation_lock);
 
                 */
                to_free += ei->i_reserved_meta_blocks;
                ei->i_reserved_meta_blocks = 0;
+               ei->i_da_metadata_calc_len = 0;
        }
 
        /* update fs dirty blocks counter */
                 * XXX: __block_prepare_write() unmaps passed block,
                 * is it OK?
                 */
-               ret = ext4_da_reserve_space(inode, 1);
+               ret = ext4_da_reserve_space(inode, iblock);
                if (ret)
                        /* not enough space to reserve */
                        return ret;