if NVM
 
-config NVM_DEBUG
-       bool "Open-Channel SSD debugging support"
-       default n
-       ---help---
-       Exposes a debug management interface to create/remove targets at:
+config NVM_PBLK
+       tristate "Physical Block Device Open-Channel SSD target"
+       help
+         Allows an open-channel SSD to be exposed as a block device to the
+         host. The target assumes the device exposes raw flash and must be
+         explicitly managed by the host.
 
-         /sys/module/lnvm/parameters/configure_debug
+         Please note the disk format is considered EXPERIMENTAL for now.
 
-       It is required to create/remove targets without IOCTLs.
+if NVM_PBLK
 
-config NVM_PBLK
-       tristate "Physical Block Device Open-Channel SSD target"
-       ---help---
-       Allows an open-channel SSD to be exposed as a block device to the
-       host. The target assumes the device exposes raw flash and must be
-       explicitly managed by the host.
+config NVM_PBLK_DEBUG
+       bool "PBlk Debug Support"
+       default n
+       help
+         Enables debug support for pblk. This includes extra checks, more
+         vocal error messages, and extra tracking fields in the pblk sysfs
+         entries.
 
-       Please note the disk format is considered EXPERIMENTAL for now.
+endif # NVM_PBLK_DEBUG
 
 endif # NVM
 
 
        atomic64_add(nr_entries, &pblk->user_wa);
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_long_add(nr_entries, &pblk->inflight_writes);
        atomic_long_add(nr_entries, &pblk->req_writes);
 #endif
 
        atomic64_add(valid_entries, &pblk->gc_wa);
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_long_add(valid_entries, &pblk->inflight_writes);
        atomic_long_add(valid_entries, &pblk->recov_gc_writes);
 #endif
 
        u64 paddr;
        int line_id;
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        /* Callers must ensure that the ppa points to a device address */
        BUG_ON(pblk_addr_in_cache(ppa));
        BUG_ON(pblk_ppa_empty(ppa));
 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
 {
        atomic_long_inc(&pblk->write_failed);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        pblk_print_failed_rqd(pblk, rqd, rqd->error);
 #endif
 }
        default:
                pr_err("pblk: unknown read error:%d\n", rqd->error);
        }
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        pblk_print_failed_rqd(pblk, rqd, rqd->error);
 #endif
 }
 
        atomic_inc(&pblk->inflight_io);
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        if (pblk_check_io(pblk, rqd))
                return NVM_IO_ERR;
 #endif
 
        atomic_inc(&pblk->inflight_io);
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        if (pblk_check_io(pblk, rqd))
                return NVM_IO_ERR;
 #endif
        struct list_head *move_list;
        int i;
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
                                "pblk: corrupt closed line %d\n", line->id);
 #endif
         * Only send one inflight I/O per LUN. Since we map at a page
         * granurality, all ppas in the I/O will map to the same LUN
         */
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        int i;
 
        for (i = 1; i < nr_ppas; i++)
        struct pblk_lun *rlun;
        int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        int i;
 
        for (i = 1; i < nr_ppas; i++)
 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
 {
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        /* Callers must ensure that the ppa points to a cache address */
        BUG_ON(!pblk_addr_in_cache(ppa));
        BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
        struct ppa_addr ppa_l2p, ppa_gc;
        int ret = 1;
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        /* Callers must ensure that the ppa points to a cache address */
        BUG_ON(!pblk_addr_in_cache(ppa_new));
        BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
 {
        struct ppa_addr ppa_l2p;
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        /* Callers must ensure that the ppa points to a device address */
        BUG_ON(pblk_addr_in_cache(ppa_mapped));
 #endif
        /* Invalidate and discard padded entries */
        if (lba == ADDR_EMPTY) {
                atomic64_inc(&pblk->pad_wa);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
                atomic_long_inc(&pblk->padded_wb);
 #endif
                if (!pblk_ppa_empty(ppa_mapped))
                goto out;
        }
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
 #endif
 
 
                io_schedule();
        }
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        pr_info("pblk: flushing gc pipeline, %d lines left\n",
                atomic_read(&gc->pipeline_gc));
 #endif
 
        return entry_size * pblk->rl.nr_secs;
 }
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
 static u32 pblk_l2p_crc(struct pblk *pblk)
 {
        size_t map_size;
                }
        }
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
 #endif
 
        pblk_gc_exit(pblk, graceful);
        pblk_tear_down(pblk, graceful);
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
 #endif
 
        spin_lock_init(&pblk->trans_lock);
        spin_lock_init(&pblk->lock);
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_long_set(&pblk->inflight_writes, 0);
        atomic_long_set(&pblk->padded_writes, 0);
        atomic_long_set(&pblk->padded_wb, 0);
 
        } while (iter > 0);
        up_write(&pblk_rb_lock);
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_set(&rb->inflight_flush_point, 0);
 #endif
 
 
        entry = &rb->entries[ring_pos];
        flags = READ_ONCE(entry->w_ctx.flags);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        /* Caller must guarantee that the entry is free */
        BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
 #endif
 
        entry = &rb->entries[ring_pos];
        flags = READ_ONCE(entry->w_ctx.flags);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        /* Caller must guarantee that the entry is free */
        BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
 #endif
                return 0;
        }
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_inc(&rb->inflight_flush_point);
 #endif
 
                atomic64_add(pad, &pblk->pad_wa);
        }
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_long_add(pad, &pblk->padded_writes);
 #endif
 
        int ret = 1;
 
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        /* Caller must ensure that the access will not cause an overflow */
        BUG_ON(pos >= rb->nr_entries);
 #endif
                        rb->subm,
                        rb->sync,
                        rb->l2p_update,
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
                        atomic_read(&rb->inflight_flush_point),
 #else
                        0,
                        rb->subm,
                        rb->sync,
                        rb->l2p_update,
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
                        atomic_read(&rb->inflight_flush_point),
 #else
                        0,
 
                                sector_t lba, struct ppa_addr ppa,
                                int bio_iter, bool advanced_bio)
 {
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        /* Callers must ensure that the ppa points to a cache address */
        BUG_ON(pblk_ppa_empty(ppa));
        BUG_ON(!pblk_addr_in_cache(ppa));
                        WARN_ON(test_and_set_bit(i, read_bitmap));
                        meta_list[i].lba = cpu_to_le64(lba);
                        advanced_bio = true;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
                        atomic_long_inc(&pblk->cache_reads);
 #endif
                } else {
        else
                rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_long_add(nr_secs, &pblk->inflight_reads);
 #endif
 }
                        continue;
 
                if (lba != blba + i) {
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
                        struct ppa_addr *p;
 
                        p = (nr_lbas == 1) ? &rqd->ppa_list[i] : &rqd->ppa_addr;
                meta_lba = le64_to_cpu(meta_lba_list[j].lba);
 
                if (lba != meta_lba) {
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
                        struct ppa_addr *p;
                        int nr_ppas = rqd->nr_ppas;
 
 
 static void pblk_end_user_read(struct bio *bio)
 {
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
 #endif
        bio_endio(bio);
        if (put_line)
                pblk_read_put_rqd_kref(pblk, rqd);
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
        atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
 #endif
 
        if (rqd->error) {
                atomic_long_inc(&pblk->read_failed);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
                pblk_print_failed_rqd(pblk, rqd, rqd->error);
 #endif
        }
 
        pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_long_inc(&pblk->inflight_reads);
 #endif
 
                WARN_ON(test_and_set_bit(0, read_bitmap));
                meta_list[0].lba = cpu_to_le64(lba);
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
                atomic_long_inc(&pblk->cache_reads);
 #endif
        } else {
                rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
        }
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_long_add(valid_secs, &pblk->inflight_reads);
 #endif
 
        rqd->ppa_addr = ppa_l2p;
        valid_secs = 1;
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_long_inc(&pblk->inflight_reads);
 #endif
 
 
        if (rqd.error) {
                atomic_long_inc(&pblk->read_failed_gc);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
                pblk_print_failed_rqd(pblk, &rqd, rqd.error);
 #endif
        }
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
        atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
        atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
 
        return sz;
 }
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
 static ssize_t pblk_sysfs_stats_debug(struct pblk *pblk, char *page)
 {
        return snprintf(page, PAGE_SIZE,
        .mode = 0644,
 };
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
 static struct attribute sys_stats_debug_attr = {
        .name = "stats",
        .mode = 0444,
        &sys_write_amp_mileage,
        &sys_write_amp_trip,
        &sys_padding_dist,
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        &sys_stats_debug_attr,
 #endif
        NULL,
                return pblk_sysfs_get_write_amp_trip(pblk, buf);
        else if (strcmp(attr->name, "padding_dist") == 0)
                return pblk_sysfs_get_padding_dist(pblk, buf);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        else if (strcmp(attr->name, "stats") == 0)
                return pblk_sysfs_stats_debug(pblk, buf);
 #endif
 
                        /* Release flags on context. Protect from writes */
                        smp_store_release(&w_ctx->flags, flags);
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
                        atomic_dec(&rwb->inflight_flush_point);
 #endif
                }
                pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
                                                        c_ctx->nr_padded);
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
 #endif
 
        unsigned long flags;
        unsigned long pos;
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
 #endif
 
        list_add_tail(&r_ctx->list, &pblk->resubmit_list);
        spin_unlock(&pblk->resubmit_lock);
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
 #endif
 }
                pblk_end_w_fail(pblk, rqd);
                return;
        }
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        else
                WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
 #endif
 
        secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        if ((!secs_to_sync && secs_to_flush)
                        || (secs_to_sync < 0)
                        || (secs_to_sync > secs_avail && !secs_to_flush)) {
        if (pblk_submit_io_set(pblk, rqd))
                goto fail_free_bio;
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_long_add(secs_to_sync, &pblk->sub_writes);
 #endif
 
 
        spinlock_t w_lock;              /* Write lock */
        spinlock_t s_lock;              /* Sync lock */
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        atomic_t inflight_flush_point;  /* Not served REQ_FLUSH | REQ_FUA */
 #endif
 };
        u64 nr_flush_rst;               /* Flushes reset value for pad dist.*/
        atomic64_t nr_flush;            /* Number of flush/fua I/O */
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
        /* Non-persistent debug counters, 4kb sector I/Os */
        atomic_long_t inflight_writes;  /* Inflight writes (user and gc) */
        atomic_long_t padded_writes;    /* Sectors padded due to flush/fua */
        return !(nr_secs % pblk->min_write_pgs);
 }
 
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
 static inline void print_ppa(struct nvm_geo *geo, struct ppa_addr *p,
                             char *msg, int error)
 {