}
 
 struct damon_young_walk_private {
-       unsigned long *page_sz;
+       /* size of the folio for the access checked virtual memory address */
+       unsigned long *folio_sz;
        bool young;
 };
 
                if (pmd_young(*pmd) || !folio_test_idle(folio) ||
                                        mmu_notifier_test_young(walk->mm,
                                                addr)) {
-                       *priv->page_sz = HPAGE_PMD_SIZE;
+                       *priv->folio_sz = HPAGE_PMD_SIZE;
                        priv->young = true;
                }
                folio_put(folio);
                goto out;
        if (pte_young(*pte) || !folio_test_idle(folio) ||
                        mmu_notifier_test_young(walk->mm, addr)) {
-               *priv->page_sz = PAGE_SIZE;
+               *priv->folio_sz = PAGE_SIZE;
                priv->young = true;
        }
        folio_put(folio);
 
        if (pte_young(entry) || !folio_test_idle(folio) ||
            mmu_notifier_test_young(walk->mm, addr)) {
-               *priv->page_sz = huge_page_size(h);
+               *priv->folio_sz = huge_page_size(h);
                priv->young = true;
        }
 
 };
 
 static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
-               unsigned long *page_sz)
+               unsigned long *folio_sz)
 {
        struct damon_young_walk_private arg = {
-               .page_sz = page_sz,
+               .folio_sz = folio_sz,
                .young = false,
        };
 
                                struct damon_region *r, bool same_target)
 {
        static unsigned long last_addr;
-       static unsigned long last_page_sz = PAGE_SIZE;
+       static unsigned long last_folio_sz = PAGE_SIZE;
        static bool last_accessed;
 
        /* If the region is in the last checked page, reuse the result */
-       if (same_target && (ALIGN_DOWN(last_addr, last_page_sz) ==
-                               ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
+       if (same_target && (ALIGN_DOWN(last_addr, last_folio_sz) ==
+                               ALIGN_DOWN(r->sampling_addr, last_folio_sz))) {
                if (last_accessed)
                        r->nr_accesses++;
                return;
        }
 
-       last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz);
+       last_accessed = damon_va_young(mm, r->sampling_addr, &last_folio_sz);
        if (last_accessed)
                r->nr_accesses++;