EXT4_STATE_DIO_UNWRITTEN,       /* need convert on dio done*/
        EXT4_STATE_NEWENTRY,            /* File just added to dir */
        EXT4_STATE_DELALLOC_RESERVED,   /* blks already reserved for delalloc */
+       EXT4_STATE_DIOREAD_LOCK,        /* Disable support for dio read
+                                          nolocking */
 };
 
 #define EXT4_INODE_BIT_FNS(name, field, offset)                                \
        set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
 }
 
+/*
+ * Disable DIO read nolock optimization, so new dioreaders will be forced
+ * to grab i_mutex
+ */
+static inline void ext4_inode_block_unlocked_dio(struct inode *inode)
+{
+       ext4_set_inode_state(inode, EXT4_STATE_DIOREAD_LOCK);
+       smp_mb();
+}
+static inline void ext4_inode_resume_unlocked_dio(struct inode *inode)
+{
+       smp_mb();
+       ext4_clear_inode_state(inode, EXT4_STATE_DIOREAD_LOCK);
+}
+
 #define in_range(b, first, len)        ((b) >= (first) && (b) <= (first) + (len) - 1)
 
 /* For ioend & aio unwritten conversion wait queues */
 
                if (unlikely(!list_empty(&ei->i_completed_io_list)))
                        ext4_flush_completed_IO(inode);
 
+               /*
+                * Nolock dioread optimization may be dynamically disabled
+                * via ext4_inode_block_unlocked_dio(). Check inode's state
+                * while holding extra i_dio_count ref.
+                */
+               atomic_inc(&inode->i_dio_count);
+               smp_mb();
+               if (unlikely(ext4_test_inode_state(inode,
+                                                   EXT4_STATE_DIOREAD_LOCK))) {
+                       inode_dio_done(inode);
+                       goto locked;
+               }
                ret = __blockdev_direct_IO(rw, iocb, inode,
                                 inode->i_sb->s_bdev, iov,
                                 offset, nr_segs,
                                 ext4_get_block, NULL, NULL, 0);
+               inode_dio_done(inode);
        } else {
+locked:
                ret = blockdev_direct_IO(rw, iocb, inode, iov,
                                 offset, nr_segs, ext4_get_block);
 
 
                        return err;
        }
 
+       /* Wait for all existing dio workers */
+       ext4_inode_block_unlocked_dio(inode);
+       inode_dio_wait(inode);
+
        jbd2_journal_lock_updates(journal);
 
        /*
        ext4_set_aops(inode);
 
        jbd2_journal_unlock_updates(journal);
+       ext4_inode_resume_unlocked_dio(inode);
 
        /* Finally we can mark the inode as dirty. */
 
 
        /* Protect orig and donor inodes against a truncate */
        mext_inode_double_lock(orig_inode, donor_inode);
 
+       /* Wait for all existing dio workers */
+       ext4_inode_block_unlocked_dio(orig_inode);
+       ext4_inode_block_unlocked_dio(donor_inode);
+       inode_dio_wait(orig_inode);
+       inode_dio_wait(donor_inode);
+
        /* Protect extent tree against block allocations via delalloc */
        double_down_write_data_sem(orig_inode, donor_inode);
        /* Check the filesystem environment whether move_extent can be done */
                kfree(holecheck_path);
        }
        double_up_write_data_sem(orig_inode, donor_inode);
+       ext4_inode_resume_unlocked_dio(orig_inode);
+       ext4_inode_resume_unlocked_dio(donor_inode);
        mext_inode_double_unlock(orig_inode, donor_inode);
 
        return ret;