return ret;
 }
 
-static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
-                                               pmd_t *pmd, unsigned int flags)
-{
-       struct inode *inode = file_inode(vma->vm_file);
-       struct ext2_inode_info *ei = EXT2_I(inode);
-       int ret;
-
-       if (flags & FAULT_FLAG_WRITE) {
-               sb_start_pagefault(inode->i_sb);
-               file_update_time(vma->vm_file);
-       }
-       down_read(&ei->dax_sem);
-
-       ret = dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block);
-
-       up_read(&ei->dax_sem);
-       if (flags & FAULT_FLAG_WRITE)
-               sb_end_pagefault(inode->i_sb);
-       return ret;
-}
-
 static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
                struct vm_fault *vmf)
 {
 
 static const struct vm_operations_struct ext2_dax_vm_ops = {
        .fault          = ext2_dax_fault,
-       .pmd_fault      = ext2_dax_pmd_fault,
+       /*
+        * .pmd_fault is not supported for DAX because allocation in ext2
+        * cannot be reliably aligned to huge page sizes and so pmd faults
+        * will always fail and fail back to regular faults.
+        */
        .page_mkwrite   = ext2_dax_fault,
        .pfn_mkwrite    = ext2_dax_pfn_mkwrite,
 };
 
        file_accessed(file);
        vma->vm_ops = &ext2_dax_vm_ops;
-       vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+       vma->vm_flags |= VM_MIXEDMAP;
        return 0;
 }
 #else