int shmem_unuse(unsigned int type);
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, bool shmem_huge_force,
-                                     struct mm_struct *mm, unsigned long vm_flags);
 unsigned long shmem_allowable_huge_orders(struct inode *inode,
                                struct vm_area_struct *vma, pgoff_t index,
-                               bool global_huge);
+                               bool shmem_huge_force);
 #else
-static __always_inline bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
-                                                     bool shmem_huge_force, struct mm_struct *mm,
-                                                     unsigned long vm_flags)
-{
-       return false;
-}
 static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
                                struct vm_area_struct *vma, pgoff_t index,
-                               bool global_huge)
+                               bool shmem_huge_force)
 {
        return 0;
 }
 
         * Must be done before hugepage flags check since shmem has its
         * own flags.
         */
-       if (!in_pf && shmem_file(vma->vm_file)) {
-               bool global_huge = shmem_huge_global_enabled(file_inode(vma->vm_file),
-                                                            vma->vm_pgoff, !enforce_sysfs,
-                                                            vma->vm_mm, vm_flags);
-
-               if (!vma_is_anon_shmem(vma))
-                       return global_huge ? orders : 0;
+       if (!in_pf && shmem_file(vma->vm_file))
                return shmem_allowable_huge_orders(file_inode(vma->vm_file),
-                                                       vma, vma->vm_pgoff, global_huge);
-       }
+                                                  vma, vma->vm_pgoff,
+                                                  !enforce_sysfs);
 
        if (!vma_is_anonymous(vma)) {
                /*
 
 static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
 
 static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
-                                       bool shmem_huge_force, struct mm_struct *mm,
+                                       bool shmem_huge_force, struct vm_area_struct *vma,
                                        unsigned long vm_flags)
 {
+       struct mm_struct *mm = vma ? vma->vm_mm : NULL;
        loff_t i_size;
 
        if (!S_ISREG(inode->i_mode))
        }
 }
 
-bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
-                  bool shmem_huge_force, struct mm_struct *mm,
+static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+                  bool shmem_huge_force, struct vm_area_struct *vma,
                   unsigned long vm_flags)
 {
        if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
                return false;
 
        return __shmem_huge_global_enabled(inode, index, shmem_huge_force,
-                                          mm, vm_flags);
+                                          vma, vm_flags);
 }
 
 #if defined(CONFIG_SYSFS)
 {
        return 0;
 }
+
+static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+               bool shmem_huge_force, struct vm_area_struct *vma,
+               unsigned long vm_flags)
+{
+       return false;
+}
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 /*
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 unsigned long shmem_allowable_huge_orders(struct inode *inode,
                                struct vm_area_struct *vma, pgoff_t index,
-                               bool global_huge)
+                               bool shmem_huge_force)
 {
        unsigned long mask = READ_ONCE(huge_shmem_orders_always);
        unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
-       unsigned long vm_flags = vma->vm_flags;
+       unsigned long vm_flags = vma ? vma->vm_flags : 0;
+       bool global_huge;
        loff_t i_size;
        int order;
 
-       if ((vm_flags & VM_NOHUGEPAGE) ||
-           test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+       if (vma && ((vm_flags & VM_NOHUGEPAGE) ||
+           test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
                return 0;
 
        /* If the hardware/firmware marked hugepage support disabled. */
        if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
                return 0;
 
+       global_huge = shmem_huge_global_enabled(inode, index, shmem_huge_force,
+                                               vma, vm_flags);
+       if (!vma || !vma_is_anon_shmem(vma)) {
+               /*
+                * For tmpfs, we now only support PMD sized THP if huge page
+                * is enabled, otherwise fallback to order 0.
+                */
+               return global_huge ? BIT(HPAGE_PMD_ORDER) : 0;
+       }
+
        /*
         * Following the 'deny' semantics of the top level, force the huge
         * option off from all mounts.
        struct mm_struct *fault_mm;
        struct folio *folio;
        int error;
-       bool alloced, huge;
+       bool alloced;
        unsigned long orders = 0;
 
        if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping)))
                return 0;
        }
 
-       huge = shmem_huge_global_enabled(inode, index, false, fault_mm,
-                            vma ? vma->vm_flags : 0);
-       /* Find hugepage orders that are allowed for anonymous shmem. */
-       if (vma && vma_is_anon_shmem(vma))
-               orders = shmem_allowable_huge_orders(inode, vma, index, huge);
-       else if (huge)
-               orders = BIT(HPAGE_PMD_ORDER);
-
+       /* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */
+       orders = shmem_allowable_huge_orders(inode, vma, index, false);
        if (orders > 0) {
                gfp_t huge_gfp;