]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/damon/paddr: support addr_unit for DAMOS_LRU_[DE]PRIO
authorSeongJae Park <sj@kernel.org>
Thu, 28 Aug 2025 17:12:35 +0000 (10:12 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:25:22 +0000 (17:25 -0700)
Add support of addr_unit for DAMOS_LRU_PRIO and DAMOS_LRU_DEPRIO action
handling from the DAMOS operation implementation for the physical address
space.

Link: https://lkml.kernel.org/r/20250828171242.59810-5-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Quanmin Yan <yanquanmin1@huawei.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: ze zuo <zuoze1@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/damon/paddr.c

index 696aeb0f6c8edd439951621cdda47e6e1cb5428d..2cd1c9953983c7a79853c78ae51ab46a12555b61 100644 (file)
@@ -207,14 +207,15 @@ put_folio:
 }
 
 static inline unsigned long damon_pa_mark_accessed_or_deactivate(
-               struct damon_region *r, struct damos *s, bool mark_accessed,
+               struct damon_region *r, unsigned long addr_unit,
+               struct damos *s, bool mark_accessed,
                unsigned long *sz_filter_passed)
 {
-       unsigned long addr, applied = 0;
+       phys_addr_t addr, applied = 0;
        struct folio *folio;
 
-       addr = r->ar.start;
-       while (addr < r->ar.end) {
+       addr = damon_pa_phys_addr(r->ar.start, addr_unit);
+       while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) {
                folio = damon_get_folio(PHYS_PFN(addr));
                if (damon_pa_invalid_damos_folio(folio, s)) {
                        addr += PAGE_SIZE;
@@ -224,7 +225,7 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
                if (damos_pa_filter_out(s, folio))
                        goto put_folio;
                else
-                       *sz_filter_passed += folio_size(folio);
+                       *sz_filter_passed += folio_size(folio) / addr_unit;
 
                if (mark_accessed)
                        folio_mark_accessed(folio);
@@ -236,20 +237,22 @@ put_folio:
                folio_put(folio);
        }
        s->last_applied = folio;
-       return applied * PAGE_SIZE;
+       return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit);
 }
 
 static unsigned long damon_pa_mark_accessed(struct damon_region *r,
-       struct damos *s, unsigned long *sz_filter_passed)
+               unsigned long addr_unit, struct damos *s,
+               unsigned long *sz_filter_passed)
 {
-       return damon_pa_mark_accessed_or_deactivate(r, s, true,
+       return damon_pa_mark_accessed_or_deactivate(r, addr_unit, s, true,
                        sz_filter_passed);
 }
 
 static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
-       struct damos *s, unsigned long *sz_filter_passed)
+               unsigned long addr_unit, struct damos *s,
+               unsigned long *sz_filter_passed)
 {
-       return damon_pa_mark_accessed_or_deactivate(r, s, false,
+       return damon_pa_mark_accessed_or_deactivate(r, addr_unit, s, false,
                        sz_filter_passed);
 }
 
@@ -322,9 +325,11 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
        case DAMOS_PAGEOUT:
                return damon_pa_pageout(r, aunit, scheme, sz_filter_passed);
        case DAMOS_LRU_PRIO:
-               return damon_pa_mark_accessed(r, scheme, sz_filter_passed);
+               return damon_pa_mark_accessed(r, aunit, scheme,
+                               sz_filter_passed);
        case DAMOS_LRU_DEPRIO:
-               return damon_pa_deactivate_pages(r, scheme, sz_filter_passed);
+               return damon_pa_deactivate_pages(r, aunit, scheme,
+                               sz_filter_passed);
        case DAMOS_MIGRATE_HOT:
        case DAMOS_MIGRATE_COLD:
                return damon_pa_migrate(r, scheme, sz_filter_passed);