* i) plug io further to this physical block. (see bio_prison code).
  *
  * ii) quiesce any read io to that shared data block.  Obviously
- * including all devices that share this block.  (see deferred_set code)
+ * including all devices that share this block.  (see dm_deferred_set code)
  *
  * iii) copy the data block to a newly allocate block.  This step can be
  * missed out if the io covers the block. (schedule_copy).
  * by a key, multiple bios can be in the same cell.  When the cell is
  * subsequently unlocked the bios become available.
  */
-struct bio_prison;
+struct dm_bio_prison;
 
-struct cell_key {
+struct dm_cell_key {
        int virtual;
        dm_thin_id dev;
        dm_block_t block;
 
 struct dm_bio_prison_cell {
        struct hlist_node list;
-       struct bio_prison *prison;
-       struct cell_key key;
+       struct dm_bio_prison *prison;
+       struct dm_cell_key key;
        struct bio *holder;
        struct bio_list bios;
 };
 
-struct bio_prison {
+struct dm_bio_prison {
        spinlock_t lock;
        mempool_t *cell_pool;
 
  * @nr_cells should be the number of cells you want in use _concurrently_.
  * Don't confuse it with the number of distinct keys.
  */
-static struct bio_prison *prison_create(unsigned nr_cells)
+static struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells)
 {
        unsigned i;
        uint32_t nr_buckets = calc_nr_buckets(nr_cells);
-       size_t len = sizeof(struct bio_prison) +
+       size_t len = sizeof(struct dm_bio_prison) +
                (sizeof(struct hlist_head) * nr_buckets);
-       struct bio_prison *prison = kmalloc(len, GFP_KERNEL);
+       struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL);
 
        if (!prison)
                return NULL;
        return prison;
 }
 
-static void prison_destroy(struct bio_prison *prison)
+static void dm_bio_prison_destroy(struct dm_bio_prison *prison)
 {
        mempool_destroy(prison->cell_pool);
        kfree(prison);
 }
 
-static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key)
+static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
 {
        const unsigned long BIG_PRIME = 4294967291UL;
        uint64_t hash = key->block * BIG_PRIME;
        return (uint32_t) (hash & prison->hash_mask);
 }
 
-static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
+static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs)
 {
               return (lhs->virtual == rhs->virtual) &&
                       (lhs->dev == rhs->dev) &&
 }
 
 static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
-                                                 struct cell_key *key)
+                                                 struct dm_cell_key *key)
 {
        struct dm_bio_prison_cell *cell;
        struct hlist_node *tmp;
  *
  * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
  */
-static int bio_detain(struct bio_prison *prison, struct cell_key *key,
-                     struct bio *inmate, struct dm_bio_prison_cell **ref)
+static int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
+                        struct bio *inmate, struct dm_bio_prison_cell **ref)
 {
        int r = 1;
        unsigned long flags;
  */
 static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
 {
-       struct bio_prison *prison = cell->prison;
+       struct dm_bio_prison *prison = cell->prison;
 
        hlist_del(&cell->list);
 
        mempool_free(cell, prison->cell_pool);
 }
 
-static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
+static void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
 {
        unsigned long flags;
-       struct bio_prison *prison = cell->prison;
+       struct dm_bio_prison *prison = cell->prison;
 
        spin_lock_irqsave(&prison->lock, flags);
        __cell_release(cell, bios);
        __cell_release(cell, NULL);
 }
 
-static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
+static void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
 {
        unsigned long flags;
-       struct bio_prison *prison = cell->prison;
+       struct dm_bio_prison *prison = cell->prison;
 
        spin_lock_irqsave(&prison->lock, flags);
        __cell_release_singleton(cell, bio);
 static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
                                     struct bio_list *inmates)
 {
-       struct bio_prison *prison = cell->prison;
+       struct dm_bio_prison *prison = cell->prison;
 
        hlist_del(&cell->list);
        bio_list_merge(inmates, &cell->bios);
        mempool_free(cell, prison->cell_pool);
 }
 
-static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
-                                  struct bio_list *inmates)
+static void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell,
+                                     struct bio_list *inmates)
 {
        unsigned long flags;
-       struct bio_prison *prison = cell->prison;
+       struct dm_bio_prison *prison = cell->prison;
 
        spin_lock_irqsave(&prison->lock, flags);
        __cell_release_no_holder(cell, inmates);
        spin_unlock_irqrestore(&prison->lock, flags);
 }
 
-static void cell_error(struct dm_bio_prison_cell *cell)
+static void dm_cell_error(struct dm_bio_prison_cell *cell)
 {
-       struct bio_prison *prison = cell->prison;
+       struct dm_bio_prison *prison = cell->prison;
        struct bio_list bios;
        struct bio *bio;
        unsigned long flags;
  * new mapping could free the old block that the read bios are mapped to.
  */
 
-struct deferred_set;
-struct deferred_entry {
-       struct deferred_set *ds;
+struct dm_deferred_set;
+struct dm_deferred_entry {
+       struct dm_deferred_set *ds;
        unsigned count;
        struct list_head work_items;
 };
 
-struct deferred_set {
+struct dm_deferred_set {
        spinlock_t lock;
        unsigned current_entry;
        unsigned sweeper;
-       struct deferred_entry entries[DEFERRED_SET_SIZE];
+       struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
 };
 
-static void ds_init(struct deferred_set *ds)
+static struct dm_deferred_set *dm_deferred_set_create(void)
 {
        int i;
+       struct dm_deferred_set *ds;
+
+       ds = kmalloc(sizeof(*ds), GFP_KERNEL);
+       if (!ds)
+               return NULL;
 
        spin_lock_init(&ds->lock);
        ds->current_entry = 0;
                ds->entries[i].count = 0;
                INIT_LIST_HEAD(&ds->entries[i].work_items);
        }
+
+       return ds;
 }
 
-static struct deferred_entry *ds_inc(struct deferred_set *ds)
+static void dm_deferred_set_destroy(struct dm_deferred_set *ds)
+{
+       kfree(ds);
+}
+
+static struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
 {
        unsigned long flags;
-       struct deferred_entry *entry;
+       struct dm_deferred_entry *entry;
 
        spin_lock_irqsave(&ds->lock, flags);
        entry = ds->entries + ds->current_entry;
        return (index + 1) % DEFERRED_SET_SIZE;
 }
 
-static void __sweep(struct deferred_set *ds, struct list_head *head)
+static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
 {
        while ((ds->sweeper != ds->current_entry) &&
               !ds->entries[ds->sweeper].count) {
                list_splice_init(&ds->entries[ds->sweeper].work_items, head);
 }
 
-static void ds_dec(struct deferred_entry *entry, struct list_head *head)
+static void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
 {
        unsigned long flags;
 
 /*
  * Returns 1 if deferred or 0 if no pending items to delay job.
  */
-static int ds_add_work(struct deferred_set *ds, struct list_head *work)
+static int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
 {
        int r = 1;
        unsigned long flags;
        return r;
 }
 
+static int __init dm_bio_prison_init(void)
+{
+       _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
+       if (!_cell_cache)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void __exit dm_bio_prison_exit(void)
+{
+       kmem_cache_destroy(_cell_cache);
+       _cell_cache = NULL;
+}
+
 /*----------------------------------------------------------------*/
 
 /*
  * Key building.
  */
 static void build_data_key(struct dm_thin_device *td,
-                          dm_block_t b, struct cell_key *key)
+                          dm_block_t b, struct dm_cell_key *key)
 {
        key->virtual = 0;
        key->dev = dm_thin_dev_id(td);
 }
 
 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
-                             struct cell_key *key)
+                             struct dm_cell_key *key)
 {
        key->virtual = 1;
        key->dev = dm_thin_dev_id(td);
        unsigned low_water_triggered:1; /* A dm event has been sent */
        unsigned no_free_space:1;       /* A -ENOSPC warning has been issued */
 
-       struct bio_prison *prison;
+       struct dm_bio_prison *prison;
        struct dm_kcopyd_client *copier;
 
        struct workqueue_struct *wq;
 
        struct bio_list retry_on_resume_list;
 
-       struct deferred_set shared_read_ds;
-       struct deferred_set all_io_ds;
+       struct dm_deferred_set *shared_read_ds;
+       struct dm_deferred_set *all_io_ds;
 
        struct dm_thin_new_mapping *next_mapping;
        mempool_t *mapping_pool;
 
 struct dm_thin_endio_hook {
        struct thin_c *tc;
-       struct deferred_entry *shared_read_entry;
-       struct deferred_entry *all_io_entry;
+       struct dm_deferred_entry *shared_read_entry;
+       struct dm_deferred_entry *all_io_entry;
        struct dm_thin_new_mapping *overwrite_mapping;
 };
 
        unsigned long flags;
 
        spin_lock_irqsave(&pool->lock, flags);
-       cell_release(cell, &pool->deferred_bios);
+       dm_cell_release(cell, &pool->deferred_bios);
        spin_unlock_irqrestore(&tc->pool->lock, flags);
 
        wake_worker(pool);
        bio_list_init(&bios);
 
        spin_lock_irqsave(&pool->lock, flags);
-       cell_release_no_holder(cell, &pool->deferred_bios);
+       dm_cell_release_no_holder(cell, &pool->deferred_bios);
        spin_unlock_irqrestore(&pool->lock, flags);
 
        wake_worker(pool);
 {
        if (m->bio)
                m->bio->bi_end_io = m->saved_bi_end_io;
-       cell_error(m->cell);
+       dm_cell_error(m->cell);
        list_del(&m->list);
        mempool_free(m, m->tc->pool->mapping_pool);
 }
                bio->bi_end_io = m->saved_bi_end_io;
 
        if (m->err) {
-               cell_error(m->cell);
+               dm_cell_error(m->cell);
                goto out;
        }
 
        r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
        if (r) {
                DMERR("dm_thin_insert_block() failed");
-               cell_error(m->cell);
+               dm_cell_error(m->cell);
                goto out;
        }
 
        m->err = 0;
        m->bio = NULL;
 
-       if (!ds_add_work(&pool->shared_read_ds, &m->list))
+       if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
                m->quiesced = 1;
 
        /*
                if (r < 0) {
                        mempool_free(m, pool->mapping_pool);
                        DMERR("dm_kcopyd_copy() failed");
-                       cell_error(cell);
+                       dm_cell_error(cell);
                }
        }
 }
                if (r < 0) {
                        mempool_free(m, pool->mapping_pool);
                        DMERR("dm_kcopyd_zero() failed");
-                       cell_error(cell);
+                       dm_cell_error(cell);
                }
        }
 }
        struct bio_list bios;
 
        bio_list_init(&bios);
-       cell_release(cell, &bios);
+       dm_cell_release(cell, &bios);
 
        while ((bio = bio_list_pop(&bios)))
                retry_on_resume(bio);
        unsigned long flags;
        struct pool *pool = tc->pool;
        struct dm_bio_prison_cell *cell, *cell2;
-       struct cell_key key, key2;
+       struct dm_cell_key key, key2;
        dm_block_t block = get_bio_block(tc, bio);
        struct dm_thin_lookup_result lookup_result;
        struct dm_thin_new_mapping *m;
 
        build_virtual_key(tc->td, block, &key);
-       if (bio_detain(tc->pool->prison, &key, bio, &cell))
+       if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
                return;
 
        r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
                 * on this block.
                 */
                build_data_key(tc->td, lookup_result.block, &key2);
-               if (bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
-                       cell_release_singleton(cell, bio);
+               if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
+                       dm_cell_release_singleton(cell, bio);
                        break;
                }
 
                        m->err = 0;
                        m->bio = bio;
 
-                       if (!ds_add_work(&pool->all_io_ds, &m->list)) {
+                       if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
                                spin_lock_irqsave(&pool->lock, flags);
                                list_add(&m->list, &pool->prepared_discards);
                                spin_unlock_irqrestore(&pool->lock, flags);
                         * a block boundary.  So we submit the discard of a
                         * partial block appropriately.
                         */
-                       cell_release_singleton(cell, bio);
-                       cell_release_singleton(cell2, bio);
+                       dm_cell_release_singleton(cell, bio);
+                       dm_cell_release_singleton(cell2, bio);
                        if ((!lookup_result.shared) && pool->pf.discard_passdown)
                                remap_and_issue(tc, bio, lookup_result.block);
                        else
                /*
                 * It isn't provisioned, just forget it.
                 */
-               cell_release_singleton(cell, bio);
+               dm_cell_release_singleton(cell, bio);
                bio_endio(bio, 0);
                break;
 
        default:
                DMERR("discard: find block unexpectedly returned %d", r);
-               cell_release_singleton(cell, bio);
+               dm_cell_release_singleton(cell, bio);
                bio_io_error(bio);
                break;
        }
 }
 
 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
-                         struct cell_key *key,
+                         struct dm_cell_key *key,
                          struct dm_thin_lookup_result *lookup_result,
                          struct dm_bio_prison_cell *cell)
 {
 
        default:
                DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
-               cell_error(cell);
+               dm_cell_error(cell);
                break;
        }
 }
 {
        struct dm_bio_prison_cell *cell;
        struct pool *pool = tc->pool;
-       struct cell_key key;
+       struct dm_cell_key key;
 
        /*
         * If cell is already occupied, then sharing is already in the process
         * of being broken so we have nothing further to do here.
         */
        build_data_key(tc->td, lookup_result->block, &key);
-       if (bio_detain(pool->prison, &key, bio, &cell))
+       if (dm_bio_detain(pool->prison, &key, bio, &cell))
                return;
 
        if (bio_data_dir(bio) == WRITE && bio->bi_size)
        else {
                struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
 
-               h->shared_read_entry = ds_inc(&pool->shared_read_ds);
+               h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
 
-               cell_release_singleton(cell, bio);
+               dm_cell_release_singleton(cell, bio);
                remap_and_issue(tc, bio, lookup_result->block);
        }
 }
         * Remap empty bios (flushes) immediately, without provisioning.
         */
        if (!bio->bi_size) {
-               cell_release_singleton(cell, bio);
+               dm_cell_release_singleton(cell, bio);
                remap_and_issue(tc, bio, 0);
                return;
        }
         */
        if (bio_data_dir(bio) == READ) {
                zero_fill_bio(bio);
-               cell_release_singleton(cell, bio);
+               dm_cell_release_singleton(cell, bio);
                bio_endio(bio, 0);
                return;
        }
        default:
                DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
                set_pool_mode(tc->pool, PM_READ_ONLY);
-               cell_error(cell);
+               dm_cell_error(cell);
                break;
        }
 }
        int r;
        dm_block_t block = get_bio_block(tc, bio);
        struct dm_bio_prison_cell *cell;
-       struct cell_key key;
+       struct dm_cell_key key;
        struct dm_thin_lookup_result lookup_result;
 
        /*
         * being provisioned so we have nothing further to do here.
         */
        build_virtual_key(tc->td, block, &key);
-       if (bio_detain(tc->pool->prison, &key, bio, &cell))
+       if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
                return;
 
        r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
                 * TODO: this will probably have to change when discard goes
                 * back in.
                 */
-               cell_release_singleton(cell, bio);
+               dm_cell_release_singleton(cell, bio);
 
                if (lookup_result.shared)
                        process_shared_bio(tc, bio, block, &lookup_result);
 
        case -ENODATA:
                if (bio_data_dir(bio) == READ && tc->origin_dev) {
-                       cell_release_singleton(cell, bio);
+                       dm_cell_release_singleton(cell, bio);
                        remap_to_origin_and_issue(tc, bio);
                } else
                        provision_block(tc, bio, block, cell);
 
        default:
                DMERR("dm_thin_find_block() failed, error = %d", r);
-               cell_release_singleton(cell, bio);
+               dm_cell_release_singleton(cell, bio);
                bio_io_error(bio);
                break;
        }
 
        h->tc = tc;
        h->shared_read_entry = NULL;
-       h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : ds_inc(&pool->all_io_ds);
+       h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds);
        h->overwrite_mapping = NULL;
 
        return h;
        if (dm_pool_metadata_close(pool->pmd) < 0)
                DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
 
-       prison_destroy(pool->prison);
+       dm_bio_prison_destroy(pool->prison);
        dm_kcopyd_client_destroy(pool->copier);
 
        if (pool->wq)
                mempool_free(pool->next_mapping, pool->mapping_pool);
        mempool_destroy(pool->mapping_pool);
        mempool_destroy(pool->endio_hook_pool);
+       dm_deferred_set_destroy(pool->shared_read_ds);
+       dm_deferred_set_destroy(pool->all_io_ds);
        kfree(pool);
 }
 
                pool->sectors_per_block_shift = __ffs(block_size);
        pool->low_water_blocks = 0;
        pool_features_init(&pool->pf);
-       pool->prison = prison_create(PRISON_CELLS);
+       pool->prison = dm_bio_prison_create(PRISON_CELLS);
        if (!pool->prison) {
                *error = "Error creating pool's bio prison";
                err_p = ERR_PTR(-ENOMEM);
        pool->low_water_triggered = 0;
        pool->no_free_space = 0;
        bio_list_init(&pool->retry_on_resume_list);
-       ds_init(&pool->shared_read_ds);
-       ds_init(&pool->all_io_ds);
+
+       pool->shared_read_ds = dm_deferred_set_create();
+       if (!pool->shared_read_ds) {
+               *error = "Error creating pool's shared read deferred set";
+               err_p = ERR_PTR(-ENOMEM);
+               goto bad_shared_read_ds;
+       }
+
+       pool->all_io_ds = dm_deferred_set_create();
+       if (!pool->all_io_ds) {
+               *error = "Error creating pool's all io deferred set";
+               err_p = ERR_PTR(-ENOMEM);
+               goto bad_all_io_ds;
+       }
 
        pool->next_mapping = NULL;
        pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
 bad_endio_hook_pool:
        mempool_destroy(pool->mapping_pool);
 bad_mapping_pool:
+       dm_deferred_set_destroy(pool->all_io_ds);
+bad_all_io_ds:
+       dm_deferred_set_destroy(pool->shared_read_ds);
+bad_shared_read_ds:
        destroy_workqueue(pool->wq);
 bad_wq:
        dm_kcopyd_client_destroy(pool->copier);
 bad_kcopyd_client:
-       prison_destroy(pool->prison);
+       dm_bio_prison_destroy(pool->prison);
 bad_prison:
        kfree(pool);
 bad_pool:
 
        if (h->shared_read_entry) {
                INIT_LIST_HEAD(&work);
-               ds_dec(h->shared_read_entry, &work);
+               dm_deferred_entry_dec(h->shared_read_entry, &work);
 
                spin_lock_irqsave(&pool->lock, flags);
                list_for_each_entry_safe(m, tmp, &work, list) {
 
        if (h->all_io_entry) {
                INIT_LIST_HEAD(&work);
-               ds_dec(h->all_io_entry, &work);
+               dm_deferred_entry_dec(h->all_io_entry, &work);
                spin_lock_irqsave(&pool->lock, flags);
                list_for_each_entry_safe(m, tmp, &work, list)
                        list_add(&m->list, &pool->prepared_discards);
 
        r = -ENOMEM;
 
-       _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
-       if (!_cell_cache)
-               goto bad_cell_cache;
+       dm_bio_prison_init();
 
        _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
        if (!_new_mapping_cache)
 bad_endio_hook_cache:
        kmem_cache_destroy(_new_mapping_cache);
 bad_new_mapping_cache:
-       kmem_cache_destroy(_cell_cache);
-bad_cell_cache:
        dm_unregister_target(&pool_target);
 bad_pool_target:
        dm_unregister_target(&thin_target);
        dm_unregister_target(&thin_target);
        dm_unregister_target(&pool_target);
 
-       kmem_cache_destroy(_cell_cache);
+       dm_bio_prison_exit();
        kmem_cache_destroy(_new_mapping_cache);
        kmem_cache_destroy(_endio_hook_cache);
 }