static ssize_t compact_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t len)
 {
-       unsigned long nr_migrated;
        struct zram *zram = dev_to_zram(dev);
        struct zram_meta *meta;
 
        }
 
        meta = zram->meta;
-       nr_migrated = zs_compact(meta->mem_pool);
-       atomic64_add(nr_migrated, &zram->stats.num_migrated);
+       zs_compact(meta->mem_pool);
        up_read(&zram->init_lock);
 
        return len;
                struct device_attribute *attr, char *buf)
 {
        struct zram *zram = dev_to_zram(dev);
+       struct zs_pool_stats pool_stats;
        u64 orig_size, mem_used = 0;
        long max_used;
        ssize_t ret;
 
+       memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
+
        down_read(&zram->init_lock);
-       if (init_done(zram))
+       if (init_done(zram)) {
                mem_used = zs_get_total_pages(zram->meta->mem_pool);
+               zs_pool_stats(zram->meta->mem_pool, &pool_stats);
+       }
 
        orig_size = atomic64_read(&zram->stats.pages_stored);
        max_used = atomic_long_read(&zram->stats.max_used_pages);
 
        ret = scnprintf(buf, PAGE_SIZE,
-                       "%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n",
+                       "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
                        orig_size << PAGE_SHIFT,
                        (u64)atomic64_read(&zram->stats.compr_data_size),
                        mem_used << PAGE_SHIFT,
                        zram->limit_pages << PAGE_SHIFT,
                        max_used << PAGE_SHIFT,
                        (u64)atomic64_read(&zram->stats.zero_pages),
-                       (u64)atomic64_read(&zram->stats.num_migrated));
+                       pool_stats.num_migrated);
        up_read(&zram->init_lock);
 
        return ret;
 
        gfp_t flags;    /* allocation flags used when growing pool */
        atomic_long_t pages_allocated;
 
+       struct zs_pool_stats stats;
 #ifdef CONFIG_ZSMALLOC_STAT
        struct dentry *stat_dentry;
 #endif
         /* Starting object index within @s_page which used for live object
          * in the subpage. */
        int index;
-       /* how many of objects are migrated */
+       /* How many of objects were migrated */
        int nr_migrated;
 };
 
        struct page *s_page = cc->s_page;
        struct page *d_page = cc->d_page;
        unsigned long index = cc->index;
-       int nr_migrated = 0;
        int ret = 0;
 
        while (1) {
                record_obj(handle, free_obj);
                unpin_tag(handle);
                obj_free(pool, class, used_obj);
-               nr_migrated++;
+               cc->nr_migrated++;
        }
 
        /* Remember last position in this iteration */
        cc->s_page = s_page;
        cc->index = index;
-       cc->nr_migrated = nr_migrated;
 
        return ret;
 }
        return obj_wasted * get_pages_per_zspage(class->size);
 }
 
-static unsigned long __zs_compact(struct zs_pool *pool,
-                               struct size_class *class)
+static void __zs_compact(struct zs_pool *pool, struct size_class *class)
 {
        struct zs_compact_control cc;
        struct page *src_page;
        struct page *dst_page = NULL;
-       unsigned long nr_total_migrated = 0;
 
+       cc.nr_migrated = 0;
        spin_lock(&class->lock);
        while ((src_page = isolate_source_page(class))) {
 
                                break;
 
                        putback_zspage(pool, class, dst_page);
-                       nr_total_migrated += cc.nr_migrated;
                }
 
                /* Stop if we couldn't find slot */
                putback_zspage(pool, class, dst_page);
                putback_zspage(pool, class, src_page);
                spin_unlock(&class->lock);
-               nr_total_migrated += cc.nr_migrated;
                cond_resched();
                spin_lock(&class->lock);
        }
        if (src_page)
                putback_zspage(pool, class, src_page);
 
-       spin_unlock(&class->lock);
+       pool->stats.num_migrated += cc.nr_migrated;
 
-       return nr_total_migrated;
+       spin_unlock(&class->lock);
 }
 
 unsigned long zs_compact(struct zs_pool *pool)
 {
        int i;
-       unsigned long nr_migrated = 0;
        struct size_class *class;
 
        for (i = zs_size_classes - 1; i >= 0; i--) {
                        continue;
                if (class->index != i)
                        continue;
-               nr_migrated += __zs_compact(pool, class);
+               __zs_compact(pool, class);
        }
 
-       return nr_migrated;
+       return pool->stats.num_migrated;
 }
 EXPORT_SYMBOL_GPL(zs_compact);
 
+void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
+{
+       memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
+}
+EXPORT_SYMBOL_GPL(zs_pool_stats);
+
 /**
  * zs_create_pool - Creates an allocation pool to work from.
  * @flags: allocation flags used to allocate pool metadata