/* fsync.c */
 extern int ext4_sync_file(struct file *, loff_t, loff_t, int);
-extern int ext4_flush_completed_IO(struct inode *);
+extern int ext4_flush_unwritten_io(struct inode *);
 
 /* hash.c */
 extern int ext4fs_dirhash(const char *name, int len, struct
 extern const struct inode_operations ext4_file_inode_operations;
 extern const struct file_operations ext4_file_operations;
 extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin);
+extern void ext4_unwritten_wait(struct inode *inode);
 
 /* namei.c */
 extern const struct inode_operations ext4_dir_inode_operations;
 
         * finish any pending end_io work so we won't run the risk of
         * converting any truncated blocks to initialized later
         */
-       ext4_flush_completed_IO(inode);
+       ext4_flush_unwritten_io(inode);
 
        /*
         * probably first extent we're gonna free will be last in block
 
        /* Wait all existing dio workers, newcomers will block on i_mutex */
        ext4_inode_block_unlocked_dio(inode);
-       inode_dio_wait(inode);
-       err = ext4_flush_completed_IO(inode);
+       err = ext4_flush_unwritten_io(inode);
        if (err)
                goto out_dio;
+       inode_dio_wait(inode);
 
        credits = ext4_writepage_trans_blocks(inode);
        handle = ext4_journal_start(inode, credits);
 
        return 0;
 }
 
-static void ext4_unwritten_wait(struct inode *inode)
+void ext4_unwritten_wait(struct inode *inode)
 {
        wait_queue_head_t *wq = ext4_ioend_wq(inode);
 
 
        if (inode->i_sb->s_flags & MS_RDONLY)
                goto out;
 
-       ret = ext4_flush_completed_IO(inode);
+       ret = ext4_flush_unwritten_io(inode);
        if (ret < 0)
                goto out;
 
 
 
 retry:
        if (rw == READ && ext4_should_dioread_nolock(inode)) {
-               if (unlikely(!list_empty(&ei->i_completed_io_list)))
-                       ext4_flush_completed_IO(inode);
-
+               if (unlikely(atomic_read(&EXT4_I(inode)->i_unwritten))) {
+                       mutex_lock(&inode->i_mutex);
+                       ext4_flush_unwritten_io(inode);
+                       mutex_unlock(&inode->i_mutex);
+               }
                /*
                 * Nolock dioread optimization may be dynamically disabled
                 * via ext4_inode_block_unlocked_dio(). Check inode's state
 
 
                list_add_tail(&io->list, &complete);
        }
-       /* It is important to update all flags for all end_io in one shot w/o
-        * dropping the lock.*/
        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
        while (!list_empty(&complete)) {
                io = list_entry(complete.next, ext4_io_end_t, list);
        ext4_do_flush_completed_IO(io->inode, io);
 }
 
-int ext4_flush_completed_IO(struct inode *inode)
+int ext4_flush_unwritten_io(struct inode *inode)
 {
-       return ext4_do_flush_completed_IO(inode, NULL);
+       int ret;
+       WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) &&
+                    !(inode->i_state & I_FREEING));
+       ret = ext4_do_flush_completed_IO(inode, NULL);
+       ext4_unwritten_wait(inode);
+       return ret;
 }
 
 ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)