]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/damon/paddr: support addr_unit for DAMOS_PAGEOUT
authorSeongJae Park <sj@kernel.org>
Thu, 28 Aug 2025 17:12:34 +0000 (10:12 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:25:22 +0000 (17:25 -0700)
Add support of addr_unit for DAMOS_PAGEOUT action handling from the DAMOS
operation implementation for the physical address space.

Link: https://lkml.kernel.org/r/20250828171242.59810-4-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Quanmin Yan <yanquanmin1@huawei.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: ze zuo <zuoze1@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/damon/paddr.c

index d497373c2bd2a8deeb2bb7682bcad840664d5537..696aeb0f6c8edd439951621cdda47e6e1cb5428d 100644 (file)
@@ -24,6 +24,19 @@ static phys_addr_t damon_pa_phys_addr(
        return (phys_addr_t)addr * addr_unit;
 }
 
+static unsigned long damon_pa_core_addr(
+               phys_addr_t pa, unsigned long addr_unit)
+{
+       /*
+        * Use div_u64() for avoiding linking errors related with __udivdi3,
+        * __aeabi_uldivmod, or similar problems.  This should also improve the
+        * performance optimization (read div_u64() comment for the detail).
+        */
+       if (sizeof(pa) == 8 && sizeof(addr_unit) == 4)
+               return div_u64(pa, addr_unit);
+       return pa / addr_unit;
+}
+
 static void damon_pa_mkold(phys_addr_t paddr)
 {
        struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
@@ -135,10 +148,11 @@ static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s)
        return false;
 }
 
-static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
+static unsigned long damon_pa_pageout(struct damon_region *r,
+               unsigned long addr_unit, struct damos *s,
                unsigned long *sz_filter_passed)
 {
-       unsigned long addr, applied;
+       phys_addr_t addr, applied;
        LIST_HEAD(folio_list);
        bool install_young_filter = true;
        struct damos_filter *filter;
@@ -159,8 +173,8 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
                damos_add_filter(s, filter);
        }
 
-       addr = r->ar.start;
-       while (addr < r->ar.end) {
+       addr = damon_pa_phys_addr(r->ar.start, addr_unit);
+       while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) {
                folio = damon_get_folio(PHYS_PFN(addr));
                if (damon_pa_invalid_damos_folio(folio, s)) {
                        addr += PAGE_SIZE;
@@ -170,7 +184,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
                if (damos_pa_filter_out(s, folio))
                        goto put_folio;
                else
-                       *sz_filter_passed += folio_size(folio);
+                       *sz_filter_passed += folio_size(folio) / addr_unit;
 
                folio_clear_referenced(folio);
                folio_test_clear_young(folio);
@@ -189,7 +203,7 @@ put_folio:
        applied = reclaim_pages(&folio_list);
        cond_resched();
        s->last_applied = folio;
-       return applied * PAGE_SIZE;
+       return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit);
 }
 
 static inline unsigned long damon_pa_mark_accessed_or_deactivate(
@@ -302,9 +316,11 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
                struct damon_target *t, struct damon_region *r,
                struct damos *scheme, unsigned long *sz_filter_passed)
 {
+       unsigned long aunit = ctx->addr_unit;
+
        switch (scheme->action) {
        case DAMOS_PAGEOUT:
-               return damon_pa_pageout(r, scheme, sz_filter_passed);
+               return damon_pa_pageout(r, aunit, scheme, sz_filter_passed);
        case DAMOS_LRU_PRIO:
                return damon_pa_mark_accessed(r, scheme, sz_filter_passed);
        case DAMOS_LRU_DEPRIO: