extern int khugepaged_init(void);
 extern void khugepaged_destroy(void);
 extern int start_stop_khugepaged(void);
-extern int __khugepaged_enter(struct mm_struct *mm);
+extern void __khugepaged_enter(struct mm_struct *mm);
 extern void __khugepaged_exit(struct mm_struct *mm);
-extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
-                                     unsigned long vm_flags);
+extern void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
+                                      unsigned long vm_flags);
 extern void khugepaged_min_free_kbytes_update(void);
 #ifdef CONFIG_SHMEM
 extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
        (transparent_hugepage_flags &                           \
         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
 
-static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 {
        if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
-               return __khugepaged_enter(mm);
-       return 0;
+               __khugepaged_enter(mm);
 }
 
 static inline void khugepaged_exit(struct mm_struct *mm)
                __khugepaged_exit(mm);
 }
 
-static inline int khugepaged_enter(struct vm_area_struct *vma,
+static inline void khugepaged_enter(struct vm_area_struct *vma,
                                   unsigned long vm_flags)
 {
        if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
                     (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
                    !(vm_flags & VM_NOHUGEPAGE) &&
                    !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
-                       if (__khugepaged_enter(vma->vm_mm))
-                               return -ENOMEM;
-       return 0;
+                       __khugepaged_enter(vma->vm_mm);
 }
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 {
-       return 0;
 }
 static inline void khugepaged_exit(struct mm_struct *mm)
 {
 }
-static inline int khugepaged_enter(struct vm_area_struct *vma,
-                                  unsigned long vm_flags)
+static inline void khugepaged_enter(struct vm_area_struct *vma,
+                                   unsigned long vm_flags)
 {
-       return 0;
 }
-static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
-                                            unsigned long vm_flags)
+static inline void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
+                                             unsigned long vm_flags)
 {
-       return 0;
 }
 static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
                                           unsigned long addr)
 
                 * register it here without waiting a page fault that
                 * may not happen any time soon.
                 */
-               if (khugepaged_enter_vma_merge(vma, *vm_flags))
-                       return -ENOMEM;
+               khugepaged_enter_vma_merge(vma, *vm_flags);
                break;
        case MADV_NOHUGEPAGE:
                *vm_flags &= ~VM_HUGEPAGE;
        return true;
 }
 
-int __khugepaged_enter(struct mm_struct *mm)
+void __khugepaged_enter(struct mm_struct *mm)
 {
        struct mm_slot *mm_slot;
        int wakeup;
 
        mm_slot = alloc_mm_slot();
        if (!mm_slot)
-               return -ENOMEM;
+               return;
 
        /* __khugepaged_exit() must not run from under us */
        VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
        if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
                free_mm_slot(mm_slot);
-               return 0;
+               return;
        }
 
        spin_lock(&khugepaged_mm_lock);
        mmgrab(mm);
        if (wakeup)
                wake_up_interruptible(&khugepaged_wait);
-
-       return 0;
 }
 
-int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
+void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
                               unsigned long vm_flags)
 {
        unsigned long hstart, hend;
         * file-private shmem THP is not supported.
         */
        if (!hugepage_vma_check(vma, vm_flags))
-               return 0;
+               return;
 
        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
        hend = vma->vm_end & HPAGE_PMD_MASK;
        if (hstart < hend)
-               return khugepaged_enter(vma, vm_flags);
-       return 0;
+               khugepaged_enter(vma, vm_flags);
 }
 
 void __khugepaged_exit(struct mm_struct *mm)