]> www.infradead.org Git - users/willy/xarray.git/commitdiff
mm/zsmalloc: move record_obj() into obj_malloc()
authorChengming Zhou <chengming.zhou@linux.dev>
Thu, 27 Jun 2024 07:59:59 +0000 (15:59 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Jul 2024 22:52:13 +0000 (15:52 -0700)
We always record_obj() to make handle points to object after obj_malloc(),
so simplify the code by moving record_obj() into obj_malloc().  There
should be no functional change.

Link: https://lkml.kernel.org/r/20240627075959.611783-2-chengming.zhou@linux.dev
Signed-off-by: Chengming Zhou <chengming.zhou@linux.dev>
Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/zsmalloc.c

index 7fc25fa4e6b348bbf7b3b900dcea1dee1fbae19f..c2f4e62ffb46c7394e444eecfeb86c222494b130 100644 (file)
@@ -1306,7 +1306,6 @@ static unsigned long obj_malloc(struct zs_pool *pool,
        void *vaddr;
 
        class = pool->size_class[zspage->class];
-       handle |= OBJ_ALLOCATED_TAG;
        obj = get_freeobj(zspage);
 
        offset = obj * class->size;
@@ -1322,15 +1321,16 @@ static unsigned long obj_malloc(struct zs_pool *pool,
        set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
        if (likely(!ZsHugePage(zspage)))
                /* record handle in the header of allocated chunk */
-               link->handle = handle;
+               link->handle = handle | OBJ_ALLOCATED_TAG;
        else
                /* record handle to page->index */
-               zspage->first_page->index = handle;
+               zspage->first_page->index = handle | OBJ_ALLOCATED_TAG;
 
        kunmap_atomic(vaddr);
        mod_zspage_inuse(zspage, 1);
 
        obj = location_to_obj(m_page, obj);
+       record_obj(handle, obj);
 
        return obj;
 }
@@ -1348,7 +1348,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
  */
 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
 {
-       unsigned long handle, obj;
+       unsigned long handle;
        struct size_class *class;
        int newfg;
        struct zspage *zspage;
@@ -1371,10 +1371,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
        spin_lock(&class->lock);
        zspage = find_get_zspage(class);
        if (likely(zspage)) {
-               obj = obj_malloc(pool, zspage, handle);
+               obj_malloc(pool, zspage, handle);
                /* Now move the zspage to another fullness group, if required */
                fix_fullness_group(class, zspage);
-               record_obj(handle, obj);
                class_stat_inc(class, ZS_OBJS_INUSE, 1);
 
                goto out;
@@ -1389,10 +1388,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
        }
 
        spin_lock(&class->lock);
-       obj = obj_malloc(pool, zspage, handle);
+       obj_malloc(pool, zspage, handle);
        newfg = get_fullness_group(class, zspage);
        insert_zspage(class, zspage, newfg);
-       record_obj(handle, obj);
        atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
        class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
        class_stat_inc(class, ZS_OBJS_INUSE, 1);
@@ -1591,7 +1589,6 @@ static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
                free_obj = obj_malloc(pool, dst_zspage, handle);
                zs_object_copy(class, free_obj, used_obj);
                obj_idx++;
-               record_obj(handle, free_obj);
                obj_free(class->size, used_obj);
 
                /* Stop if there is no more space */