goto out_commit;
                did_quota = 1;
 
+               data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
+
                ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off,
                                           &num);
                if (ret) {
 
                        goto out;
                }
 
+               if (data_ac)
+                       data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
+
                credits = ocfs2_calc_extend_credits(inode->i_sb,
                                                    &di->id2.i_list,
                                                    clusters_to_alloc);
 
 
        down_write(&OCFS2_I(inode)->ip_alloc_sem);
 
+       ocfs2_resv_discard(&osb->osb_la_resmap,
+                          &OCFS2_I(inode)->ip_la_data_resv);
+
        /*
         * The inode lock forced other nodes to sync and drop their
         * pages, which (correctly) happens even if we have a truncate
 
        ocfs2_mark_lockres_freeing(&oi->ip_inode_lockres);
        ocfs2_mark_lockres_freeing(&oi->ip_open_lockres);
 
+       ocfs2_resv_discard(&OCFS2_SB(inode->i_sb)->osb_la_resmap,
+                          &oi->ip_la_data_resv);
+       ocfs2_resv_init_once(&oi->ip_la_data_resv);
+
        /* We very well may get a clear_inode before all an inodes
         * metadata has hit disk. Of course, we can't drop any cluster
         * locks until the journal has finished with it. The only
 
        /* Only valid if the inode is the dir. */
        u32                             ip_last_used_slot;
        u64                             ip_last_used_group;
+
+       struct ocfs2_alloc_reservation  ip_la_data_resv;
 };
 
 /*
 
        oi->ip_blkno = 0ULL;
        oi->ip_clusters = 0;
 
+       ocfs2_resv_init_once(&oi->ip_la_data_resv);
+
        ocfs2_lock_res_init_once(&oi->ip_rw_lockres);
        ocfs2_lock_res_init_once(&oi->ip_inode_lockres);
        ocfs2_lock_res_init_once(&oi->ip_open_lockres);