module_param_named(max_pool_percent,
                        zswap_max_pool_percent, uint, 0644);
 
+/* zbud_pool is shared by all of zswap backend  */
+static struct zbud_pool *zswap_pool;
+
 /*********************************
 * compression functions
 **********************************/
 struct zswap_tree {
        struct rb_root rbroot;
        spinlock_t lock;
-       struct zbud_pool *pool;
 };
 
 static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
  * Carries out the common pattern of freeing and entry's zbud allocation,
  * freeing the entry itself, and decrementing the number of stored pages.
  */
-static void zswap_free_entry(struct zswap_tree *tree,
-                       struct zswap_entry *entry)
+static void zswap_free_entry(struct zswap_entry *entry)
 {
-       zbud_free(tree->pool, entry->handle);
+       zbud_free(zswap_pool, entry->handle);
        zswap_entry_cache_free(entry);
        atomic_dec(&zswap_stored_pages);
-       zswap_pool_pages = zbud_get_pool_size(tree->pool);
+       zswap_pool_pages = zbud_get_pool_size(zswap_pool);
 }
 
 /* caller must hold the tree lock */
        BUG_ON(refcount < 0);
        if (refcount == 0) {
                zswap_rb_erase(&tree->rbroot, entry);
-               zswap_free_entry(tree, entry);
+               zswap_free_entry(entry);
        }
 }
 
        zbud_unmap(pool, handle);
        tree = zswap_trees[swp_type(swpentry)];
        offset = swp_offset(swpentry);
-       BUG_ON(pool != tree->pool);
 
        /* find and ref zswap entry */
        spin_lock(&tree->lock);
        case ZSWAP_SWAPCACHE_NEW: /* page is locked */
                /* decompress */
                dlen = PAGE_SIZE;
-               src = (u8 *)zbud_map(tree->pool, entry->handle) +
+               src = (u8 *)zbud_map(zswap_pool, entry->handle) +
                        sizeof(struct zswap_header);
                dst = kmap_atomic(page);
                ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src,
                                entry->length, dst, &dlen);
                kunmap_atomic(dst);
-               zbud_unmap(tree->pool, entry->handle);
+               zbud_unmap(zswap_pool, entry->handle);
                BUG_ON(ret);
                BUG_ON(dlen != PAGE_SIZE);
 
        /* reclaim space if needed */
        if (zswap_is_full()) {
                zswap_pool_limit_hit++;
-               if (zbud_reclaim_page(tree->pool, 8)) {
+               if (zbud_reclaim_page(zswap_pool, 8)) {
                        zswap_reject_reclaim_fail++;
                        ret = -ENOMEM;
                        goto reject;
 
        /* store */
        len = dlen + sizeof(struct zswap_header);
-       ret = zbud_alloc(tree->pool, len, __GFP_NORETRY | __GFP_NOWARN,
+       ret = zbud_alloc(zswap_pool, len, __GFP_NORETRY | __GFP_NOWARN,
                &handle);
        if (ret == -ENOSPC) {
                zswap_reject_compress_poor++;
                zswap_reject_alloc_fail++;
                goto freepage;
        }
-       zhdr = zbud_map(tree->pool, handle);
+       zhdr = zbud_map(zswap_pool, handle);
        zhdr->swpentry = swp_entry(type, offset);
        buf = (u8 *)(zhdr + 1);
        memcpy(buf, dst, dlen);
-       zbud_unmap(tree->pool, handle);
+       zbud_unmap(zswap_pool, handle);
        put_cpu_var(zswap_dstmem);
 
        /* populate entry */
 
        /* update stats */
        atomic_inc(&zswap_stored_pages);
-       zswap_pool_pages = zbud_get_pool_size(tree->pool);
+       zswap_pool_pages = zbud_get_pool_size(zswap_pool);
 
        return 0;
 
 
        /* decompress */
        dlen = PAGE_SIZE;
-       src = (u8 *)zbud_map(tree->pool, entry->handle) +
+       src = (u8 *)zbud_map(zswap_pool, entry->handle) +
                        sizeof(struct zswap_header);
        dst = kmap_atomic(page);
        ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length,
                dst, &dlen);
        kunmap_atomic(dst);
-       zbud_unmap(tree->pool, entry->handle);
+       zbud_unmap(zswap_pool, entry->handle);
        BUG_ON(ret);
 
        spin_lock(&tree->lock);
        /* walk the tree and free everything */
        spin_lock(&tree->lock);
        rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
-               zswap_free_entry(tree, entry);
+               zswap_free_entry(entry);
        tree->rbroot = RB_ROOT;
        spin_unlock(&tree->lock);
-
-       zbud_destroy_pool(tree->pool);
        kfree(tree);
        zswap_trees[type] = NULL;
 }
        struct zswap_tree *tree;
 
        tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL);
-       if (!tree)
-               goto err;
-       tree->pool = zbud_create_pool(GFP_KERNEL, &zswap_zbud_ops);
-       if (!tree->pool)
-               goto freetree;
+       if (!tree) {
+               pr_err("alloc failed, zswap disabled for swap type %d\n", type);
+               return;
+       }
+
        tree->rbroot = RB_ROOT;
        spin_lock_init(&tree->lock);
        zswap_trees[type] = tree;
-       return;
-
-freetree:
-       kfree(tree);
-err:
-       pr_err("alloc failed, zswap disabled for swap type %d\n", type);
 }
 
 static struct frontswap_ops zswap_frontswap_ops = {
                return 0;
 
        pr_info("loading zswap\n");
+
+       zswap_pool = zbud_create_pool(GFP_KERNEL, &zswap_zbud_ops);
+       if (!zswap_pool) {
+               pr_err("zbud pool creation failed\n");
+               goto error;
+       }
+
        if (zswap_entry_cache_create()) {
                pr_err("entry cache creation failed\n");
-               goto error;
+               goto cachefail;
        }
        if (zswap_comp_init()) {
                pr_err("compressor initialization failed\n");
                pr_err("per-cpu initialization failed\n");
                goto pcpufail;
        }
+
        frontswap_register_ops(&zswap_frontswap_ops);
        if (zswap_debugfs_init())
                pr_warn("debugfs initialization failed\n");
        zswap_comp_exit();
 compfail:
        zswap_entry_cache_destory();
+cachefail:
+       zbud_destroy_pool(zswap_pool);
 error:
        return -ENOMEM;
 }