/* Slab cache for lock-tree entries */
 struct kmem_cache *ubi_ltree_slab;
 
+/* Slab cache for wear-leveling entries */
+struct kmem_cache *ubi_wl_entry_slab;
+
+
 /* "Show" method for files in '/<sysfs>/class/ubi/' */
 static ssize_t ubi_version_show(struct class *class, char *buf)
 {
        if (!ubi_ltree_slab)
                goto out_version;
 
+       ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
+                                               sizeof(struct ubi_wl_entry),
+                                               0, 0, NULL);
+       if (!ubi_wl_entry_slab)
+               goto out_ltree;
+
        /* Attach MTD devices */
        for (i = 0; i < mtd_devs; i++) {
                struct mtd_dev_param *p = &mtd_dev_param[i];
 out_detach:
        for (k = 0; k < i; k++)
                detach_mtd_dev(ubi_devices[k]);
+       kmem_cache_destroy(ubi_wl_entry_slab);
+out_ltree:
        kmem_cache_destroy(ubi_ltree_slab);
 out_version:
        class_remove_file(ubi_class, &ubi_version);
 
        for (i = 0; i < n; i++)
                detach_mtd_dev(ubi_devices[i]);
+       kmem_cache_destroy(ubi_wl_entry_slab);
        kmem_cache_destroy(ubi_ltree_slab);
        class_remove_file(ubi_class, &ubi_version);
        class_destroy(ubi_class);
 
  */
 #define WL_MAX_FAILURES 32
 
-/**
- * struct ubi_wl_entry - wear-leveling entry.
- * @rb: link in the corresponding RB-tree
- * @ec: erase counter
- * @pnum: physical eraseblock number
- *
- * Each physical eraseblock has a corresponding &struct wl_entry object which
- * may be kept in different RB-trees.
- */
-struct ubi_wl_entry {
-       struct rb_node rb;
-       int ec;
-       int pnum;
-};
-
 /**
  * struct ubi_wl_prot_entry - PEB protection entry.
  * @rb_pnum: link in the @wl->prot.pnum RB-tree
 #define paranoid_check_in_wl_tree(e, root)
 #endif
 
-/* Slab cache for wear-leveling entries */
-static struct kmem_cache *wl_entries_slab;
-
 /**
  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
  * @e: the wear-leveling entry to add
                dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
                err = schedule_erase(ubi, e2, 0);
                if (err) {
-                       kmem_cache_free(wl_entries_slab, e2);
+                       kmem_cache_free(ubi_wl_entry_slab, e2);
                        ubi_ro_mode(ubi);
                }
        }
 
        err = schedule_erase(ubi, e1, 0);
        if (err) {
-               kmem_cache_free(wl_entries_slab, e1);
+               kmem_cache_free(ubi_wl_entry_slab, e1);
                ubi_ro_mode(ubi);
        }
 
                dbg_wl("PEB %d was put meanwhile, erase", e1->pnum);
                err = schedule_erase(ubi, e1, 0);
                if (err) {
-                       kmem_cache_free(wl_entries_slab, e1);
+                       kmem_cache_free(ubi_wl_entry_slab, e1);
                        ubi_ro_mode(ubi);
                }
        }
 
        err = schedule_erase(ubi, e2, 0);
        if (err) {
-               kmem_cache_free(wl_entries_slab, e2);
+               kmem_cache_free(ubi_wl_entry_slab, e2);
                ubi_ro_mode(ubi);
        }
 
        if (cancel) {
                dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
                kfree(wl_wrk);
-               kmem_cache_free(wl_entries_slab, e);
+               kmem_cache_free(ubi_wl_entry_slab, e);
                return 0;
        }
 
 
        ubi_err("failed to erase PEB %d, error %d", pnum, err);
        kfree(wl_wrk);
-       kmem_cache_free(wl_entries_slab, e);
+       kmem_cache_free(ubi_wl_entry_slab, e);
 
        if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
            err == -EBUSY) {
                                        rb->rb_right = NULL;
                        }
 
-                       kmem_cache_free(wl_entries_slab, e);
+                       kmem_cache_free(ubi_wl_entry_slab, e);
                }
        }
 }
                return err;
        }
 
-       if (ubi_devices_cnt == 0) {
-               wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
-                                                   sizeof(struct ubi_wl_entry),
-                                                   0, 0, NULL);
-               if (!wl_entries_slab)
-                       return -ENOMEM;
-       }
-
        err = -ENOMEM;
        ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
        if (!ubi->lookuptbl)
        list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
                cond_resched();
 
-               e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+               e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
                if (!e)
                        goto out_free;
 
                e->ec = seb->ec;
                ubi->lookuptbl[e->pnum] = e;
                if (schedule_erase(ubi, e, 0)) {
-                       kmem_cache_free(wl_entries_slab, e);
+                       kmem_cache_free(ubi_wl_entry_slab, e);
                        goto out_free;
                }
        }
        list_for_each_entry(seb, &si->free, u.list) {
                cond_resched();
 
-               e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+               e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
                if (!e)
                        goto out_free;
 
        list_for_each_entry(seb, &si->corr, u.list) {
                cond_resched();
 
-               e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+               e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
                if (!e)
                        goto out_free;
 
                e->ec = seb->ec;
                ubi->lookuptbl[e->pnum] = e;
                if (schedule_erase(ubi, e, 0)) {
-                       kmem_cache_free(wl_entries_slab, e);
+                       kmem_cache_free(ubi_wl_entry_slab, e);
                        goto out_free;
                }
        }
                ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
                        cond_resched();
 
-                       e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+                       e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
                        if (!e)
                                goto out_free;
 
        tree_destroy(&ubi->free);
        tree_destroy(&ubi->scrub);
        kfree(ubi->lookuptbl);
-       if (ubi_devices_cnt == 0)
-               kmem_cache_destroy(wl_entries_slab);
        return err;
 }
 
                                        rb->rb_right = NULL;
                        }
 
-                       kmem_cache_free(wl_entries_slab, pe->e);
+                       kmem_cache_free(ubi_wl_entry_slab, pe->e);
                        kfree(pe);
                }
        }
        tree_destroy(&ubi->free);
        tree_destroy(&ubi->scrub);
        kfree(ubi->lookuptbl);
-       if (ubi_devices_cnt == 1)
-               kmem_cache_destroy(wl_entries_slab);
 }
 
 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID