cat /sys/kernel/mm/transparent_hugepage/hpage_pmd_size
 
-khugepaged will be automatically started when one or more hugepage
-sizes are enabled (either by directly setting "always" or "madvise",
-or by setting "inherit" while the top-level enabled is set to "always"
-or "madvise"), and it'll be automatically shutdown when the last
-hugepage size is disabled (either by directly setting "never", or by
-setting "inherit" while the top-level enabled is set to "never").
+khugepaged will be automatically started when PMD-sized THP is enabled
+(either of the per-size anon control or the top-level control are set
+to "always" or "madvise"), and it'll be automatically shutdown when
+PMD-sized THP is disabled (when both the per-size anon control and the
+top-level control are "never")
 
 Khugepaged controls
 -------------------
 
                        (1<<TRANSPARENT_HUGEPAGE_FLAG);
 }
 
-static inline bool hugepage_flags_enabled(void)
-{
-       /*
-        * We cover both the anon and the file-backed case here; we must return
-        * true if globally enabled, even when all anon sizes are set to never.
-        * So we don't need to look at huge_anon_orders_inherit.
-        */
-       return hugepage_global_enabled() ||
-              READ_ONCE(huge_anon_orders_always) ||
-              READ_ONCE(huge_anon_orders_madvise);
-}
-
 static inline int highest_order(unsigned long orders)
 {
        return fls_long(orders) - 1;
 
               test_bit(MMF_DISABLE_THP, &mm->flags);
 }
 
+static bool hugepage_pmd_enabled(void)
+{
+       /*
+        * We cover both the anon and the file-backed case here; file-backed
+        * hugepages, when configured in, are determined by the global control.
+        * Anon pmd-sized hugepages are determined by the pmd-size control.
+        */
+       if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
+           hugepage_global_enabled())
+               return true;
+       if (test_bit(PMD_ORDER, &huge_anon_orders_always))
+               return true;
+       if (test_bit(PMD_ORDER, &huge_anon_orders_madvise))
+               return true;
+       if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) &&
+           hugepage_global_enabled())
+               return true;
+       return false;
+}
+
 void __khugepaged_enter(struct mm_struct *mm)
 {
        struct khugepaged_mm_slot *mm_slot;
                          unsigned long vm_flags)
 {
        if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
-           hugepage_flags_enabled()) {
+           hugepage_pmd_enabled()) {
                if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
                                            PMD_ORDER))
                        __khugepaged_enter(vma->vm_mm);
 
 static int khugepaged_has_work(void)
 {
-       return !list_empty(&khugepaged_scan.mm_head) &&
-               hugepage_flags_enabled();
+       return !list_empty(&khugepaged_scan.mm_head) && hugepage_pmd_enabled();
 }
 
 static int khugepaged_wait_event(void)
                return;
        }
 
-       if (hugepage_flags_enabled())
+       if (hugepage_pmd_enabled())
                wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
 }
 
        int nr_zones = 0;
        unsigned long recommended_min;
 
-       if (!hugepage_flags_enabled()) {
+       if (!hugepage_pmd_enabled()) {
                calculate_min_free_kbytes();
                goto update_wmarks;
        }
        int err = 0;
 
        mutex_lock(&khugepaged_mutex);
-       if (hugepage_flags_enabled()) {
+       if (hugepage_pmd_enabled()) {
                if (!khugepaged_thread)
                        khugepaged_thread = kthread_run(khugepaged, NULL,
                                                        "khugepaged");
 void khugepaged_min_free_kbytes_update(void)
 {
        mutex_lock(&khugepaged_mutex);
-       if (hugepage_flags_enabled() && khugepaged_thread)
+       if (hugepage_pmd_enabled() && khugepaged_thread)
                set_recommended_min_free_kbytes();
        mutex_unlock(&khugepaged_mutex);
 }