spin_unlock(&pblk->trans_lock);
 }
 
+int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
+{
+       struct nvm_tgt_dev *dev = pblk->dev;
+
+       rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
+                                                       &rqd->dma_meta_list);
+       if (!rqd->meta_list)
+               return -ENOMEM;
+
+       if (rqd->nr_ppas == 1)
+               return 0;
+
+       rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
+       rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
+
+       return 0;
+}
+
+void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
+{
+       struct nvm_tgt_dev *dev = pblk->dev;
+
+       if (rqd->meta_list)
+               nvm_dev_dma_free(dev->parent, rqd->meta_list,
+                               rqd->dma_meta_list);
+}
+
 /* Caller must guarantee that the request is a valid type */
 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
 {
 /* Typically used on completion path. Cannot guarantee request consistency */
 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
 {
-       struct nvm_tgt_dev *dev = pblk->dev;
        mempool_t *pool;
 
        switch (type) {
                return;
        }
 
-       if (rqd->meta_list)
-               nvm_dev_dma_free(dev->parent, rqd->meta_list,
-                               rqd->dma_meta_list);
+       pblk_free_rqd_meta(pblk, rqd);
        mempool_free(rqd, pool);
 }
 
 
        memset(&rqd, 0, sizeof(struct nvm_rq));
 
-       rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
-                                                       &rqd.dma_meta_list);
-       if (!rqd.meta_list)
-               return -ENOMEM;
-
-       rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
-       rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
+       ret = pblk_alloc_rqd_meta(pblk, &rqd);
+       if (ret)
+               return ret;
 
        bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
        if (IS_ERR(bio)) {
                ret = PTR_ERR(bio);
-               goto free_ppa_list;
+               goto clear_rqd;
        }
 
        bio->bi_iter.bi_sector = 0; /* internal bio */
        if (ret) {
                pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
                bio_put(bio);
-               goto free_ppa_list;
+               goto clear_rqd;
        }
 
        atomic_dec(&pblk->inflight_io);
                        pblk_log_read_err(pblk, &rqd);
        }
 
-free_ppa_list:
-       nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
-
+clear_rqd:
+       pblk_free_rqd_meta(pblk, &rqd);
        return ret;
 }
 
 
         */
        bio_init_idx = pblk_get_bi_idx(bio);
 
-       rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
-                                                       &rqd->dma_meta_list);
-       if (!rqd->meta_list) {
-               pblk_err(pblk, "not able to allocate ppa list\n");
+       if (pblk_alloc_rqd_meta(pblk, rqd))
                goto fail_rqd_free;
-       }
-
-       if (nr_secs > 1) {
-               rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
-               rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
 
+       if (nr_secs > 1)
                pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
-       } else {
+       else
                pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
-       }
 
        if (bitmap_full(read_bitmap, nr_secs)) {
                atomic_inc(&pblk->inflight_io);
 
        memset(&rqd, 0, sizeof(struct nvm_rq));
 
-       rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
-                                                       &rqd.dma_meta_list);
-       if (!rqd.meta_list)
-               return -ENOMEM;
+       ret = pblk_alloc_rqd_meta(pblk, &rqd);
+       if (ret)
+               return ret;
 
        if (gc_rq->nr_secs > 1) {
-               rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
-               rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
-
                gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
                                                        gc_rq->lba_list,
                                                        gc_rq->paddr_list,
                                                PBLK_VMALLOC_META, GFP_KERNEL);
        if (IS_ERR(bio)) {
                pblk_err(pblk, "could not allocate GC bio (%lu)\n",
-                               PTR_ERR(bio));
+                                                               PTR_ERR(bio));
+               ret = PTR_ERR(bio);
                goto err_free_dma;
        }
 
 #endif
 
 out:
-       nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
+       pblk_free_rqd_meta(pblk, &rqd);
        return ret;
 
 err_free_bio:
        bio_put(bio);
 err_free_dma:
-       nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
+       pblk_free_rqd_meta(pblk, &rqd);
        return ret;
 }
 
 {
        struct nvm_tgt_dev *dev = pblk->dev;
        struct nvm_geo *geo = &dev->geo;
-       struct ppa_addr *ppa_list;
        struct pblk_sec_meta *meta_list;
        struct pblk_pad_rq *pad_rq;
        struct nvm_rq *rqd;
        struct bio *bio;
        void *data;
-       dma_addr_t dma_ppa_list, dma_meta_list;
        __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
        u64 w_ptr = line->cur_sec;
        int left_line_ppas, rq_ppas, rq_len;
 
        rq_len = rq_ppas * geo->csecs;
 
-       meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
-       if (!meta_list) {
-               ret = -ENOMEM;
-               goto fail_free_pad;
-       }
-
-       ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
-       dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
-
        bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
                                                PBLK_VMALLOC_META, GFP_KERNEL);
        if (IS_ERR(bio)) {
                ret = PTR_ERR(bio);
-               goto fail_free_meta;
+               goto fail_free_pad;
        }
 
        bio->bi_iter.bi_sector = 0; /* internal bio */
 
        rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
 
+       ret = pblk_alloc_rqd_meta(pblk, rqd);
+       if (ret)
+               goto fail_free_rqd;
+
        rqd->bio = bio;
        rqd->opcode = NVM_OP_PWRITE;
        rqd->is_seq = 1;
-       rqd->meta_list = meta_list;
        rqd->nr_ppas = rq_ppas;
-       rqd->ppa_list = ppa_list;
-       rqd->dma_ppa_list = dma_ppa_list;
-       rqd->dma_meta_list = dma_meta_list;
        rqd->end_io = pblk_end_io_recov;
        rqd->private = pad_rq;
 
+       meta_list = rqd->meta_list;
+
        for (i = 0; i < rqd->nr_ppas; ) {
                struct ppa_addr ppa;
                int pos;
        if (ret) {
                pblk_err(pblk, "I/O submission failed: %d\n", ret);
                pblk_up_chunk(pblk, rqd->ppa_list[0]);
-               goto fail_free_bio;
+               goto fail_free_rqd;
        }
 
        left_line_ppas -= rq_ppas;
        kfree(pad_rq);
        return ret;
 
-fail_free_bio:
+fail_free_rqd:
+       pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
        bio_put(bio);
-fail_free_meta:
-       nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
 fail_free_pad:
        kfree(pad_rq);
        vfree(data);
 
 }
 
 static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
-                          unsigned int nr_secs,
-                          nvm_end_io_fn(*end_io))
+                          unsigned int nr_secs, nvm_end_io_fn(*end_io))
 {
-       struct nvm_tgt_dev *dev = pblk->dev;
-
        /* Setup write request */
        rqd->opcode = NVM_OP_PWRITE;
        rqd->nr_ppas = nr_secs;
        rqd->private = pblk;
        rqd->end_io = end_io;
 
-       rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
-                                                       &rqd->dma_meta_list);
-       if (!rqd->meta_list)
-               return -ENOMEM;
-
-       rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
-       rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
-
-       return 0;
+       return pblk_alloc_rqd_meta(pblk, rqd);
 }
 
 static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
 
  */
 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
+int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
+void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
 int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
                        struct pblk_c_ctx *c_ctx);