}
 
 /*
- * Reserve a single cluster located at lblock
+ * Reserve space for a single cluster
  */
-static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
+static int ext4_da_reserve_space(struct inode *inode)
 {
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        struct ext4_inode_info *ei = EXT4_I(inode);
-       unsigned int md_needed;
        int ret;
 
        /*
        if (ret)
                return ret;
 
-       /*
-        * recalculate the amount of metadata blocks to reserve
-        * in order to allocate nrblocks
-        * worse case is one extent per block
-        */
        spin_lock(&ei->i_block_reservation_lock);
-       /*
-        * ext4_calc_metadata_amount() has side effects, which we have
-        * to be prepared undo if we fail to claim space.
-        */
-       md_needed = 0;
-       trace_ext4_da_reserve_space(inode, 0);
-
        if (ext4_claim_free_clusters(sbi, 1, 0)) {
                spin_unlock(&ei->i_block_reservation_lock);
                dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
                return -ENOSPC;
        }
        ei->i_reserved_data_blocks++;
+       trace_ext4_da_reserve_space(inode);
        spin_unlock(&ei->i_block_reservation_lock);
 
        return 0;       /* success */
                 * then we don't need to reserve it again. However we still need
                 * to reserve metadata for every block we're going to write.
                 */
-               if (EXT4_SB(inode->i_sb)->s_cluster_ratio <= 1 ||
+               if (EXT4_SB(inode->i_sb)->s_cluster_ratio == 1 ||
                    !ext4_find_delalloc_cluster(inode, map->m_lblk)) {
-                       ret = ext4_da_reserve_space(inode, iblock);
+                       ret = ext4_da_reserve_space(inode);
                        if (ret) {
                                /* not enough space to reserve */
                                retval = ret;
 
 );
 
 TRACE_EVENT(ext4_da_reserve_space,
-       TP_PROTO(struct inode *inode, int md_needed),
+       TP_PROTO(struct inode *inode),
 
-       TP_ARGS(inode, md_needed),
+       TP_ARGS(inode),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
                __field(        ino_t,  ino                     )
                __field(        __u64,  i_blocks                )
-               __field(        int,    md_needed               )
                __field(        int,    reserved_data_blocks    )
                __field(        int,    reserved_meta_blocks    )
                __field(        __u16,  mode                    )
                __entry->dev    = inode->i_sb->s_dev;
                __entry->ino    = inode->i_ino;
                __entry->i_blocks = inode->i_blocks;
-               __entry->md_needed = md_needed;
                __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
                __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
                __entry->mode   = inode->i_mode;
        ),
 
-       TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d "
+       TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu "
                  "reserved_data_blocks %d reserved_meta_blocks %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  __entry->mode, __entry->i_blocks,
-                 __entry->md_needed, __entry->reserved_data_blocks,
+                 __entry->reserved_data_blocks,
                  __entry->reserved_meta_blocks)
 );