PGPGOUT,
        PGSCAN_KSWAPD,
        PGSCAN_DIRECT,
+       PGSCAN_KHUGEPAGED,
        PGSTEAL_KSWAPD,
        PGSTEAL_DIRECT,
+       PGSTEAL_KHUGEPAGED,
        PGFAULT,
        PGMAJFAULT,
        PGREFILL,
        /* Accumulated memory events */
        seq_buf_printf(&s, "pgscan %lu\n",
                       memcg_events(memcg, PGSCAN_KSWAPD) +
-                      memcg_events(memcg, PGSCAN_DIRECT));
+                      memcg_events(memcg, PGSCAN_DIRECT) +
+                      memcg_events(memcg, PGSCAN_KHUGEPAGED));
        seq_buf_printf(&s, "pgsteal %lu\n",
                       memcg_events(memcg, PGSTEAL_KSWAPD) +
-                      memcg_events(memcg, PGSTEAL_DIRECT));
+                      memcg_events(memcg, PGSTEAL_DIRECT) +
+                      memcg_events(memcg, PGSTEAL_KHUGEPAGED));
 
        for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
                if (memcg_vm_event_stat[i] == PGPGIN ||
 
 #include <linux/shmem_fs.h>
 #include <linux/ctype.h>
 #include <linux/debugfs.h>
+#include <linux/khugepaged.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
                drop_slab_node(nid);
 }
 
+static int reclaimer_offset(void)
+{
+       BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD !=
+                       PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD);
+       BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD !=
+                       PGSCAN_DIRECT - PGSCAN_KSWAPD);
+       BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD !=
+                       PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD);
+       BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD !=
+                       PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD);
+
+       if (current_is_kswapd())
+               return 0;
+       if (current_is_khugepaged())
+               return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD;
+       return PGSTEAL_DIRECT - PGSTEAL_KSWAPD;
+}
+
 static inline int is_page_cache_freeable(struct folio *folio)
 {
        /*
                      (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
                      &nr_succeeded);
 
-       if (current_is_kswapd())
-               __count_vm_events(PGDEMOTE_KSWAPD, nr_succeeded);
-       else
-               __count_vm_events(PGDEMOTE_DIRECT, nr_succeeded);
+       __count_vm_events(PGDEMOTE_KSWAPD + reclaimer_offset(), nr_succeeded);
 
        return nr_succeeded;
 }
                                     &nr_scanned, sc, lru);
 
        __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
-       item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
+       item = PGSCAN_KSWAPD + reclaimer_offset();
        if (!cgroup_reclaim(sc))
                __count_vm_events(item, nr_scanned);
        __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
        move_folios_to_lru(lruvec, &folio_list);
 
        __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
-       item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
+       item = PGSTEAL_KSWAPD + reclaimer_offset();
        if (!cgroup_reclaim(sc))
                __count_vm_events(item, nr_reclaimed);
        __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
                        break;
        }
 
-       item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
+       item = PGSCAN_KSWAPD + reclaimer_offset();
        if (!cgroup_reclaim(sc)) {
                __count_vm_events(item, isolated);
                __count_vm_events(PGREFILL, sorted);
        if (walk && walk->batched)
                reset_batch_size(lruvec, walk);
 
-       item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
+       item = PGSTEAL_KSWAPD + reclaimer_offset();
        if (!cgroup_reclaim(sc))
                __count_vm_events(item, reclaimed);
        __count_memcg_events(memcg, item, reclaimed);