/*
  * lock ordering:
  *     page_lock
- *     pool->migrate_lock
+ *     pool->lock
  *     class->lock
  *     zspage->lock
  */
 #ifdef CONFIG_COMPACTION
        struct work_struct free_work;
 #endif
-       /* protect page/zspage migration */
-       rwlock_t migrate_lock;
+       /* protect zspage migration/compaction */
+       rwlock_t lock;
        atomic_t compaction_in_progress;
 };
 
        BUG_ON(in_interrupt());
 
        /* It guarantees it can get zspage from handle safely */
-       read_lock(&pool->migrate_lock);
+       read_lock(&pool->lock);
        obj = handle_to_obj(handle);
        obj_to_location(obj, &zpdesc, &obj_idx);
        zspage = get_zspage(zpdesc);
         * which is smaller granularity.
         */
        migrate_read_lock(zspage);
-       read_unlock(&pool->migrate_lock);
+       read_unlock(&pool->lock);
 
        class = zspage_class(pool, zspage);
        off = offset_in_page(class->size * obj_idx);
                return;
 
        /*
-        * The pool->migrate_lock protects the race with zpage's migration
+        * The pool->lock protects the race with zpage's migration
         * so it's safe to get the page from handle.
         */
-       read_lock(&pool->migrate_lock);
+       read_lock(&pool->lock);
        obj = handle_to_obj(handle);
        obj_to_zpdesc(obj, &f_zpdesc);
        zspage = get_zspage(f_zpdesc);
        class = zspage_class(pool, zspage);
        spin_lock(&class->lock);
-       read_unlock(&pool->migrate_lock);
+       read_unlock(&pool->lock);
 
        class_stat_sub(class, ZS_OBJS_INUSE, 1);
        obj_free(class->size, obj);
         * The pool migrate_lock protects the race between zpage migration
         * and zs_free.
         */
-       write_lock(&pool->migrate_lock);
+       write_lock(&pool->lock);
        class = zspage_class(pool, zspage);
 
        /*
         * Since we complete the data copy and set up new zspage structure,
         * it's okay to release migration_lock.
         */
-       write_unlock(&pool->migrate_lock);
+       write_unlock(&pool->lock);
        spin_unlock(&class->lock);
        migrate_write_unlock(zspage);
 
         * protect the race between zpage migration and zs_free
         * as well as zpage allocation/free
         */
-       write_lock(&pool->migrate_lock);
+       write_lock(&pool->lock);
        spin_lock(&class->lock);
        while (zs_can_compact(class)) {
                int fg;
                src_zspage = NULL;
 
                if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
-                   || rwlock_is_contended(&pool->migrate_lock)) {
+                   || rwlock_is_contended(&pool->lock)) {
                        putback_zspage(class, dst_zspage);
                        dst_zspage = NULL;
 
                        spin_unlock(&class->lock);
-                       write_unlock(&pool->migrate_lock);
+                       write_unlock(&pool->lock);
                        cond_resched();
-                       write_lock(&pool->migrate_lock);
+                       write_lock(&pool->lock);
                        spin_lock(&class->lock);
                }
        }
                putback_zspage(class, dst_zspage);
 
        spin_unlock(&class->lock);
-       write_unlock(&pool->migrate_lock);
+       write_unlock(&pool->lock);
 
        return pages_freed;
 }
        unsigned long pages_freed = 0;
 
        /*
-        * Pool compaction is performed under pool->migrate_lock so it is basically
+        * Pool compaction is performed under pool->lock so it is basically
         * single-threaded. Having more than one thread in __zs_compact()
-        * will increase pool->migrate_lock contention, which will impact other
-        * zsmalloc operations that need pool->migrate_lock.
+        * will increase pool->lock contention, which will impact other
+        * zsmalloc operations that need pool->lock.
         */
        if (atomic_xchg(&pool->compaction_in_progress, 1))
                return 0;
                return NULL;
 
        init_deferred_free(pool);
-       rwlock_init(&pool->migrate_lock);
+       rwlock_init(&pool->lock);
        atomic_set(&pool->compaction_in_progress, 0);
 
        pool->name = kstrdup(name, GFP_KERNEL);