zsmalloc: rename pool lock
authorSergey Senozhatsky <senozhatsky@chromium.org>
Fri, 21 Feb 2025 22:25:41 +0000 (07:25 +0900)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 28 Feb 2025 01:00:24 +0000 (17:00 -0800)
The old name comes from the times when the pool did not have compaction
(defragmentation).  Rename it to ->lock because these days it synchronizes
not only migration.

Link: https://lkml.kernel.org/r/20250221222958.2225035-11-senozhatsky@chromium.org
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Reviewed-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/zsmalloc.c

index 6d0e47f7ae33967adabadc75795593d1b3771059..2e338cde0d2133e720c9c33ac8917779a250db28 100644 (file)
@@ -18,7 +18,7 @@
 /*
  * lock ordering:
  *     page_lock
- *     pool->migrate_lock
+ *     pool->lock
  *     class->lock
  *     zspage->lock
  */
@@ -223,8 +223,8 @@ struct zs_pool {
 #ifdef CONFIG_COMPACTION
        struct work_struct free_work;
 #endif
-       /* protect page/zspage migration */
-       rwlock_t migrate_lock;
+       /* protect zspage migration/compaction */
+       rwlock_t lock;
        atomic_t compaction_in_progress;
 };
 
@@ -1206,7 +1206,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
        BUG_ON(in_interrupt());
 
        /* It guarantees it can get zspage from handle safely */
-       read_lock(&pool->migrate_lock);
+       read_lock(&pool->lock);
        obj = handle_to_obj(handle);
        obj_to_location(obj, &zpdesc, &obj_idx);
        zspage = get_zspage(zpdesc);
@@ -1218,7 +1218,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
         * which is smaller granularity.
         */
        migrate_read_lock(zspage);
-       read_unlock(&pool->migrate_lock);
+       read_unlock(&pool->lock);
 
        class = zspage_class(pool, zspage);
        off = offset_in_page(class->size * obj_idx);
@@ -1450,16 +1450,16 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
                return;
 
        /*
-        * The pool->migrate_lock protects the race with zpage's migration
+        * The pool->lock protects the race with zpage's migration
         * so it's safe to get the page from handle.
         */
-       read_lock(&pool->migrate_lock);
+       read_lock(&pool->lock);
        obj = handle_to_obj(handle);
        obj_to_zpdesc(obj, &f_zpdesc);
        zspage = get_zspage(f_zpdesc);
        class = zspage_class(pool, zspage);
        spin_lock(&class->lock);
-       read_unlock(&pool->migrate_lock);
+       read_unlock(&pool->lock);
 
        class_stat_sub(class, ZS_OBJS_INUSE, 1);
        obj_free(class->size, obj);
@@ -1796,7 +1796,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
         * The pool migrate_lock protects the race between zpage migration
         * and zs_free.
         */
-       write_lock(&pool->migrate_lock);
+       write_lock(&pool->lock);
        class = zspage_class(pool, zspage);
 
        /*
@@ -1833,7 +1833,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
         * Since we complete the data copy and set up new zspage structure,
         * it's okay to release migration_lock.
         */
-       write_unlock(&pool->migrate_lock);
+       write_unlock(&pool->lock);
        spin_unlock(&class->lock);
        migrate_write_unlock(zspage);
 
@@ -1956,7 +1956,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
         * protect the race between zpage migration and zs_free
         * as well as zpage allocation/free
         */
-       write_lock(&pool->migrate_lock);
+       write_lock(&pool->lock);
        spin_lock(&class->lock);
        while (zs_can_compact(class)) {
                int fg;
@@ -1983,14 +1983,14 @@ static unsigned long __zs_compact(struct zs_pool *pool,
                src_zspage = NULL;
 
                if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
-                   || rwlock_is_contended(&pool->migrate_lock)) {
+                   || rwlock_is_contended(&pool->lock)) {
                        putback_zspage(class, dst_zspage);
                        dst_zspage = NULL;
 
                        spin_unlock(&class->lock);
-                       write_unlock(&pool->migrate_lock);
+                       write_unlock(&pool->lock);
                        cond_resched();
-                       write_lock(&pool->migrate_lock);
+                       write_lock(&pool->lock);
                        spin_lock(&class->lock);
                }
        }
@@ -2002,7 +2002,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
                putback_zspage(class, dst_zspage);
 
        spin_unlock(&class->lock);
-       write_unlock(&pool->migrate_lock);
+       write_unlock(&pool->lock);
 
        return pages_freed;
 }
@@ -2014,10 +2014,10 @@ unsigned long zs_compact(struct zs_pool *pool)
        unsigned long pages_freed = 0;
 
        /*
-        * Pool compaction is performed under pool->migrate_lock so it is basically
+        * Pool compaction is performed under pool->lock so it is basically
         * single-threaded. Having more than one thread in __zs_compact()
-        * will increase pool->migrate_lock contention, which will impact other
-        * zsmalloc operations that need pool->migrate_lock.
+        * will increase pool->lock contention, which will impact other
+        * zsmalloc operations that need pool->lock.
         */
        if (atomic_xchg(&pool->compaction_in_progress, 1))
                return 0;
@@ -2139,7 +2139,7 @@ struct zs_pool *zs_create_pool(const char *name)
                return NULL;
 
        init_deferred_free(pool);
-       rwlock_init(&pool->migrate_lock);
+       rwlock_init(&pool->lock);
        atomic_set(&pool->compaction_in_progress, 0);
 
        pool->name = kstrdup(name, GFP_KERNEL);