{
        struct inode *inode = file_inode(iocb->ki_filp);
        int level;
-       wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
 
        /* this io's submitter should not have unlocked this before we could */
        BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
        if (ocfs2_iocb_is_unaligned_aio(iocb)) {
                ocfs2_iocb_clear_unaligned_aio(iocb);
 
-               if (atomic_dec_and_test(&OCFS2_I(inode)->ip_unaligned_aio) &&
-                   waitqueue_active(wq)) {
-                       wake_up_all(wq);
-               }
+               mutex_unlock(&OCFS2_I(inode)->ip_unaligned_aio);
        }
 
        ocfs2_iocb_clear_rw_locked(iocb);
 
 #define ocfs2_iocb_is_unaligned_aio(iocb) \
        test_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
 
-#define OCFS2_IOEND_WQ_HASH_SZ 37
-#define ocfs2_ioend_wq(v)   (&ocfs2__ioend_wq[((unsigned long)(v)) %\
-                                           OCFS2_IOEND_WQ_HASH_SZ])
-extern wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ];
-
 #endif /* OCFS2_FILE_H */
 
        return ret;
 }
 
-static void ocfs2_aiodio_wait(struct inode *inode)
-{
-       wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
-
-       wait_event(*wq, (atomic_read(&OCFS2_I(inode)->ip_unaligned_aio) == 0));
-}
-
 static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
 {
        int blockmask = inode->i_sb->s_blocksize - 1;
                 * Wait on previous unaligned aio to complete before
                 * proceeding.
                 */
-               ocfs2_aiodio_wait(inode);
-
-               /* Mark the iocb as needing a decrement in ocfs2_dio_end_io */
-               atomic_inc(&OCFS2_I(inode)->ip_unaligned_aio);
+               mutex_lock(&OCFS2_I(inode)->ip_unaligned_aio);
+               /* Mark the iocb as needing an unlock in ocfs2_dio_end_io */
                ocfs2_iocb_set_unaligned_aio(iocb);
        }
 
 
        if (unaligned_dio) {
                ocfs2_iocb_clear_unaligned_aio(iocb);
-               atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
+               mutex_unlock(&OCFS2_I(inode)->ip_unaligned_aio);
        }
 
 out:
 
        struct rw_semaphore             ip_xattr_sem;
 
        /* Number of outstanding AIO's which are not page aligned */
-       atomic_t                        ip_unaligned_aio;
+       struct mutex                    ip_unaligned_aio;
 
        /* These fields are protected by ip_lock */
        spinlock_t                      ip_lock;
 
        return 0;
 }
 
-wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ];
-
 static int __init ocfs2_init(void)
 {
-       int status, i;
-
-       for (i = 0; i < OCFS2_IOEND_WQ_HASH_SZ; i++)
-               init_waitqueue_head(&ocfs2__ioend_wq[i]);
+       int status;
 
        status = init_ocfs2_uptodate_cache();
        if (status < 0)
        ocfs2_extent_map_init(&oi->vfs_inode);
        INIT_LIST_HEAD(&oi->ip_io_markers);
        oi->ip_dir_start_lookup = 0;
-       atomic_set(&oi->ip_unaligned_aio, 0);
+       mutex_init(&oi->ip_unaligned_aio);
        init_rwsem(&oi->ip_alloc_sem);
        init_rwsem(&oi->ip_xattr_sem);
        mutex_init(&oi->ip_io_mutex);