]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/zsmalloc: add two helpers for zs_page_migrate() and make it use zpdesc
authorHyeonggon Yoo <42.hyeyoo@gmail.com>
Mon, 16 Dec 2024 15:04:41 +0000 (00:04 +0900)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 26 Jan 2025 04:22:34 +0000 (20:22 -0800)
To convert page to zpdesc in zs_page_migrate(), we added
zpdesc_is_isolated()/zpdesc_zone() helpers.  No functional change.
Link: https://lkml.kernel.org/r/20241216150450.1228021-11-42.hyeyoo@gmail.com
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
Acked-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Tested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/zpdesc.h
mm/zsmalloc.c

index cbd37d9725c7cd332007b037dc4067dbd0a46f96..193d40226188b408d51da9887bfd8d8fa7a22926 100644 (file)
@@ -154,4 +154,15 @@ static inline void __zpdesc_set_movable(struct zpdesc *zpdesc,
 {
        __SetPageMovable(zpdesc_page(zpdesc), mops);
 }
+
+static inline bool zpdesc_is_isolated(struct zpdesc *zpdesc)
+{
+       return PageIsolated(zpdesc_page(zpdesc));
+}
+
+static inline struct zone *zpdesc_zone(struct zpdesc *zpdesc)
+{
+       return page_zone(zpdesc_page(zpdesc));
+}
+
 #endif
index 112603f9449fd5408be8c1b6408c6d1c7f00a379..432e78e61d2e3c6080d284c31f9e8b89d7b5b4ba 100644 (file)
@@ -1796,19 +1796,21 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
        struct size_class *class;
        struct zspage *zspage;
        struct zpdesc *dummy;
+       struct zpdesc *newzpdesc = page_zpdesc(newpage);
+       struct zpdesc *zpdesc = page_zpdesc(page);
        void *s_addr, *d_addr, *addr;
        unsigned int offset;
        unsigned long handle;
        unsigned long old_obj, new_obj;
        unsigned int obj_idx;
 
-       VM_BUG_ON_PAGE(!PageIsolated(page), page);
+       VM_BUG_ON_PAGE(!zpdesc_is_isolated(zpdesc), zpdesc_page(zpdesc));
 
        /* We're committed, tell the world that this is a Zsmalloc page. */
-       __SetPageZsmalloc(newpage);
+       __SetPageZsmalloc(zpdesc_page(newzpdesc));
 
        /* The page is locked, so this pointer must remain valid */
-       zspage = get_zspage(page);
+       zspage = get_zspage(zpdesc_page(zpdesc));
        pool = zspage->pool;
 
        /*
@@ -1825,30 +1827,30 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
        /* the migrate_write_lock protects zpage access via zs_map_object */
        migrate_write_lock(zspage);
 
-       offset = get_first_obj_offset(page);
-       s_addr = kmap_local_page(page);
+       offset = get_first_obj_offset(zpdesc_page(zpdesc));
+       s_addr = kmap_local_zpdesc(zpdesc);
 
        /*
         * Here, any user cannot access all objects in the zspage so let's move.
         */
-       d_addr = kmap_local_page(newpage);
+       d_addr = kmap_local_zpdesc(newzpdesc);
        copy_page(d_addr, s_addr);
        kunmap_local(d_addr);
 
        for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
                                        addr += class->size) {
-               if (obj_allocated(page_zpdesc(page), addr, &handle)) {
+               if (obj_allocated(zpdesc, addr, &handle)) {
 
                        old_obj = handle_to_obj(handle);
                        obj_to_location(old_obj, &dummy, &obj_idx);
-                       new_obj = (unsigned long)location_to_obj(newpage,
+                       new_obj = (unsigned long)location_to_obj(zpdesc_page(newzpdesc),
                                                                obj_idx);
                        record_obj(handle, new_obj);
                }
        }
        kunmap_local(s_addr);
 
-       replace_sub_page(class, zspage, page_zpdesc(newpage), page_zpdesc(page));
+       replace_sub_page(class, zspage, newzpdesc, zpdesc);
        /*
         * Since we complete the data copy and set up new zspage structure,
         * it's okay to release migration_lock.
@@ -1857,14 +1859,14 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
        spin_unlock(&class->lock);
        migrate_write_unlock(zspage);
 
-       get_page(newpage);
-       if (page_zone(newpage) != page_zone(page)) {
-               dec_zone_page_state(page, NR_ZSPAGES);
-               inc_zone_page_state(newpage, NR_ZSPAGES);
+       zpdesc_get(newzpdesc);
+       if (zpdesc_zone(newzpdesc) != zpdesc_zone(zpdesc)) {
+               zpdesc_dec_zone_page_state(zpdesc);
+               zpdesc_inc_zone_page_state(newzpdesc);
        }
 
        reset_page(page);
-       put_page(page);
+       zpdesc_put(zpdesc);
 
        return MIGRATEPAGE_SUCCESS;
 }