return 0;
 }
 
+static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
+               struct request *req, struct nvme_rw_command *cmnd,
+               struct bio_vec *bv)
+{
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+       iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
+       if (dma_mapping_error(dev->dev, iod->first_dma))
+               return BLK_STS_RESOURCE;
+       iod->dma_len = bv->bv_len;
+
+       cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
+       cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
+       cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
+       return 0;
+}
+
 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
                struct nvme_command *cmnd)
 {
                        if (bv.bv_offset + bv.bv_len <= dev->ctrl.page_size * 2)
                                return nvme_setup_prp_simple(dev, req,
                                                             &cmnd->rw, &bv);
+
+                       if (iod->nvmeq->qid &&
+                           dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
+                               return nvme_setup_sgl_simple(dev, req,
+                                                            &cmnd->rw, &bv);
                }
        }