lun->vlun.lun_id = i % dev->luns_per_chnl;
                lun->vlun.chnl_id = i / dev->luns_per_chnl;
                lun->vlun.nr_free_blocks = dev->blks_per_lun;
-               lun->vlun.nr_inuse_blocks = 0;
+               lun->vlun.nr_open_blocks = 0;
+               lun->vlun.nr_closed_blocks = 0;
                lun->vlun.nr_bad_blocks = 0;
        }
        return 0;
                pba = pba - (dev->sec_per_lun * lun_id);
                blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
 
-               if (!blk->type) {
+               if (!blk->state) {
                        /* at this point, we don't know anything about the
                         * block. It's up to the FTL on top to re-etablish the
-                        * block state
+                        * block state. The block is assumed to be open.
                         */
                        list_move_tail(&blk->list, &lun->used_list);
-                       blk->type = 1;
+                       blk->state = NVM_BLK_ST_OPEN;
                        lun->vlun.nr_free_blocks--;
-                       lun->vlun.nr_inuse_blocks++;
+                       lun->vlun.nr_open_blocks++;
                }
        }
 
        module_put(THIS_MODULE);
 }
 
-static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
+static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev,
                                struct nvm_lun *vlun, unsigned long flags)
 {
        struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
        struct nvm_block *blk = NULL;
        int is_gc = flags & NVM_IOTYPE_GC;
 
-       spin_lock(&vlun->lock);
+       assert_spin_locked(&vlun->lock);
 
        if (list_empty(&lun->free_list)) {
                pr_err_ratelimited("gennvm: lun %u have no free pages available",
 
        blk = list_first_entry(&lun->free_list, struct nvm_block, list);
        list_move_tail(&blk->list, &lun->used_list);
-       blk->type = 1;
+       blk->state = NVM_BLK_ST_OPEN;
 
        lun->vlun.nr_free_blocks--;
-       lun->vlun.nr_inuse_blocks++;
+       lun->vlun.nr_open_blocks++;
 
 out:
+       return blk;
+}
+
+static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
+                               struct nvm_lun *vlun, unsigned long flags)
+{
+       struct nvm_block *blk;
+
+       spin_lock(&vlun->lock);
+       blk = gennvm_get_blk_unlocked(dev, vlun, flags);
        spin_unlock(&vlun->lock);
        return blk;
 }
 
-static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
+static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
 {
        struct nvm_lun *vlun = blk->lun;
        struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
 
-       spin_lock(&vlun->lock);
+       assert_spin_locked(&vlun->lock);
 
-       switch (blk->type) {
-       case 1:
+       if (blk->state & NVM_BLK_ST_OPEN) {
                list_move_tail(&blk->list, &lun->free_list);
+               lun->vlun.nr_open_blocks--;
                lun->vlun.nr_free_blocks++;
-               lun->vlun.nr_inuse_blocks--;
-               blk->type = 0;
-               break;
-       case 2:
+               blk->state = NVM_BLK_ST_FREE;
+       } else if (blk->state & NVM_BLK_ST_CLOSED) {
+               list_move_tail(&blk->list, &lun->free_list);
+               lun->vlun.nr_closed_blocks--;
+               lun->vlun.nr_free_blocks++;
+               blk->state = NVM_BLK_ST_FREE;
+       } else if (blk->state & NVM_BLK_ST_BAD) {
                list_move_tail(&blk->list, &lun->bb_list);
                lun->vlun.nr_bad_blocks++;
-               lun->vlun.nr_inuse_blocks--;
-               break;
-       default:
+               blk->state = NVM_BLK_ST_BAD;
+       } else {
                WARN_ON_ONCE(1);
                pr_err("gennvm: erroneous block type (%lu -> %u)\n",
-                                                       blk->id, blk->type);
+                                                       blk->id, blk->state);
                list_move_tail(&blk->list, &lun->bb_list);
                lun->vlun.nr_bad_blocks++;
-               lun->vlun.nr_inuse_blocks--;
+               blk->state = NVM_BLK_ST_BAD;
        }
+}
 
+static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
+{
+       struct nvm_lun *vlun = blk->lun;
+
+       spin_lock(&vlun->lock);
+       gennvm_put_blk_unlocked(dev, blk);
        spin_unlock(&vlun->lock);
 }
 
        blk = &lun->vlun.blocks[ppa->g.blk];
 
        /* will be moved to bb list on put_blk from target */
-       blk->type = type;
+       blk->state = type;
 }
 
 /* mark block bad. It is expected the target recover from the error. */
        /* look up blocks and mark them as bad */
        if (rqd->nr_pages > 1)
                for (i = 0; i < rqd->nr_pages; i++)
-                       gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2);
+                       gennvm_blk_set_type(dev, &rqd->ppa_list[i],
+                                               NVM_BLK_ST_BAD);
        else
-               gennvm_blk_set_type(dev, &rqd->ppa_addr, 2);
+               gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD);
 }
 
 static void gennvm_end_io(struct nvm_rq *rqd)
        gennvm_for_each_lun(gn, lun, i) {
                spin_lock(&lun->vlun.lock);
 
-               pr_info("%s: lun%8u\t%u\t%u\t%u\n",
+               pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n",
                                dev->name, i,
                                lun->vlun.nr_free_blocks,
-                               lun->vlun.nr_inuse_blocks,
+                               lun->vlun.nr_open_blocks,
+                               lun->vlun.nr_closed_blocks,
                                lun->vlun.nr_bad_blocks);
 
                spin_unlock(&lun->vlun.lock);
 }
 
 static struct nvmm_type gennvm = {
-       .name           = "gennvm",
-       .version        = {0, 1, 0},
+       .name                   = "gennvm",
+       .version                = {0, 1, 0},
+
+       .register_mgr           = gennvm_register,
+       .unregister_mgr         = gennvm_unregister,
 
-       .register_mgr   = gennvm_register,
-       .unregister_mgr = gennvm_unregister,
+       .get_blk_unlocked       = gennvm_get_blk_unlocked,
+       .put_blk_unlocked       = gennvm_put_blk_unlocked,
 
-       .get_blk        = gennvm_get_blk,
-       .put_blk        = gennvm_put_blk,
+       .get_blk                = gennvm_get_blk,
+       .put_blk                = gennvm_put_blk,
 
-       .submit_io      = gennvm_submit_io,
-       .erase_blk      = gennvm_erase_blk,
+       .submit_io              = gennvm_submit_io,
+       .erase_blk              = gennvm_erase_blk,
 
-       .get_lun        = gennvm_get_lun,
-       .lun_info_print = gennvm_lun_info_print,
+       .get_lun                = gennvm_get_lun,
+       .lun_info_print         = gennvm_lun_info_print,
 };
 
 static int __init gennvm_module_init(void)
 
 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
                                                        unsigned long flags)
 {
+       struct nvm_lun *lun = rlun->parent;
        struct nvm_block *blk;
        struct rrpc_block *rblk;
 
-       blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
-       if (!blk)
+       spin_lock(&lun->lock);
+       blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags);
+       if (!blk) {
+               pr_err("nvm: rrpc: cannot get new block from media manager\n");
+               spin_unlock(&lun->lock);
                return NULL;
+       }
 
        rblk = &rlun->blocks[blk->id];
-       blk->priv = rblk;
+       list_add_tail(&rblk->list, &rlun->open_list);
+       spin_unlock(&lun->lock);
 
+       blk->priv = rblk;
        bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
        rblk->next_page = 0;
        rblk->nr_invalid_pages = 0;
 
 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
 {
-       nvm_put_blk(rrpc->dev, rblk->parent);
+       struct rrpc_lun *rlun = rblk->rlun;
+       struct nvm_lun *lun = rlun->parent;
+
+       spin_lock(&lun->lock);
+       nvm_put_blk_unlocked(rrpc->dev, rblk->parent);
+       list_del(&rblk->list);
+       spin_unlock(&lun->lock);
 }
 
 static void rrpc_put_blks(struct rrpc *rrpc)
                lun = rblk->parent->lun;
 
                cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
-               if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
+               if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
+                       struct nvm_block *blk = rblk->parent;
+                       struct rrpc_lun *rlun = rblk->rlun;
+
+                       spin_lock(&lun->lock);
+                       lun->nr_open_blocks--;
+                       lun->nr_closed_blocks++;
+                       blk->state &= ~NVM_BLK_ST_OPEN;
+                       blk->state |= NVM_BLK_ST_CLOSED;
+                       list_move_tail(&rblk->list, &rlun->closed_list);
+                       spin_unlock(&lun->lock);
+
                        rrpc_run_gc(rrpc, rblk);
+               }
        }
 }
 
                rlun->rrpc = rrpc;
                rlun->parent = lun;
                INIT_LIST_HEAD(&rlun->prio_list);
+               INIT_LIST_HEAD(&rlun->open_list);
+               INIT_LIST_HEAD(&rlun->closed_list);
+
                INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
                spin_lock_init(&rlun->lock);
 
 
        int lun_id;
        int chnl_id;
 
-       unsigned int nr_inuse_blocks;   /* Number of used blocks */
+       /* It is up to the target to mark blocks as closed. If the target does
+        * not do it, all blocks are marked as open, and nr_open_blocks
+        * represents the number of blocks in use
+        */
+       unsigned int nr_open_blocks;    /* Number of used, writable blocks */
+       unsigned int nr_closed_blocks;  /* Number of used, read-only blocks */
        unsigned int nr_free_blocks;    /* Number of unused blocks */
        unsigned int nr_bad_blocks;     /* Number of bad blocks */
-       struct nvm_block *blocks;
 
        spinlock_t lock;
+
+       struct nvm_block *blocks;
+};
+
+enum {
+       NVM_BLK_ST_FREE =       0x1,    /* Free block */
+       NVM_BLK_ST_OPEN =       0x2,    /* Open block - read-write */
+       NVM_BLK_ST_CLOSED =     0x4,    /* Closed block - read-only */
+       NVM_BLK_ST_BAD =        0x8,    /* Bad block */
 };
 
 struct nvm_block {
        unsigned long id;
 
        void *priv;
-       int type;
+       int state;
 };
 
 struct nvm_dev {
        nvmm_unregister_fn *unregister_mgr;
 
        /* Block administration callbacks */
+       nvmm_get_blk_fn *get_blk_unlocked;
+       nvmm_put_blk_fn *put_blk_unlocked;
        nvmm_get_blk_fn *get_blk;
        nvmm_put_blk_fn *put_blk;
        nvmm_open_blk_fn *open_blk;
 extern int nvm_register_mgr(struct nvmm_type *);
 extern void nvm_unregister_mgr(struct nvmm_type *);
 
+extern struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *,
+                                       struct nvm_lun *, unsigned long);
+extern void nvm_put_blk_unlocked(struct nvm_dev *, struct nvm_block *);
+
 extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
                                                                unsigned long);
 extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);