if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
                                return BLK_STS_NOTSUPP;
                        control |= NVME_RW_PRINFO_PRACT;
+               } else if (req_op(req) == REQ_OP_WRITE) {
+                       t10_pi_prepare(req, ns->pi_type);
                }
 
                switch (ns->pi_type) {
        return 0;
 }
 
+void nvme_cleanup_cmd(struct request *req)
+{
+       if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
+           nvme_req(req)->status == 0) {
+               struct nvme_ns *ns = req->rq_disk->private_data;
+
+               t10_pi_complete(req, ns->pi_type,
+                               blk_rq_bytes(req) >> ns->lba_shift);
+       }
+       if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
+               kfree(page_address(req->special_vec.bv_page) +
+                     req->special_vec.bv_offset);
+       }
+}
+EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
+
 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
                struct nvme_command *cmd)
 {
 
        return (sector >> (ns->lba_shift - 9));
 }
 
-static inline void nvme_cleanup_cmd(struct request *req)
-{
-       if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
-               kfree(page_address(req->special_vec.bv_page) +
-                     req->special_vec.bv_offset);
-       }
-}
-
 static inline void nvme_end_request(struct request *req, __le16 status,
                union nvme_result result)
 {
 #define NVME_QID_ANY -1
 struct request *nvme_alloc_request(struct request_queue *q,
                struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
+void nvme_cleanup_cmd(struct request *req);
 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
                struct nvme_command *cmd);
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 
                mempool_free(iod->sg, dev->iod_mempool);
 }
 
-#ifdef CONFIG_BLK_DEV_INTEGRITY
-static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
-       if (be32_to_cpu(pi->ref_tag) == v)
-               pi->ref_tag = cpu_to_be32(p);
-}
-
-static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
-       if (be32_to_cpu(pi->ref_tag) == p)
-               pi->ref_tag = cpu_to_be32(v);
-}
-
-/**
- * nvme_dif_remap - remaps ref tags to bip seed and physical lba
- *
- * The virtual start sector is the one that was originally submitted by the
- * block layer.        Due to partitioning, MD/DM cloning, etc. the actual physical
- * start sector may be different. Remap protection information to match the
- * physical LBA on writes, and back to the original seed on reads.
- *
- * Type 0 and 3 do not have a ref tag, so no remapping required.
- */
-static void nvme_dif_remap(struct request *req,
-                       void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
-{
-       struct nvme_ns *ns = req->rq_disk->private_data;
-       struct bio_integrity_payload *bip;
-       struct t10_pi_tuple *pi;
-       void *p, *pmap;
-       u32 i, nlb, ts, phys, virt;
-
-       if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
-               return;
-
-       bip = bio_integrity(req->bio);
-       if (!bip)
-               return;
-
-       pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
-
-       p = pmap;
-       virt = bip_get_seed(bip);
-       phys = nvme_block_nr(ns, blk_rq_pos(req));
-       nlb = (blk_rq_bytes(req) >> ns->lba_shift);
-       ts = ns->disk->queue->integrity.tuple_size;
-
-       for (i = 0; i < nlb; i++, virt++, phys++) {
-               pi = (struct t10_pi_tuple *)p;
-               dif_swap(phys, virt, pi);
-               p += ts;
-       }
-       kunmap_atomic(pmap);
-}
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-static void nvme_dif_remap(struct request *req,
-                       void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
-{
-}
-static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
-}
-static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
-}
-#endif
-
 static void nvme_print_sgl(struct scatterlist *sgl, int nents)
 {
        int i;
                if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
                        goto out_unmap;
 
-               if (req_op(req) == REQ_OP_WRITE)
-                       nvme_dif_remap(req, nvme_dif_prep);
-
                if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
                        goto out_unmap;
        }
 
        if (iod->nents) {
                dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
-               if (blk_integrity_rq(req)) {
-                       if (req_op(req) == REQ_OP_READ)
-                               nvme_dif_remap(req, nvme_dif_complete);
+               if (blk_integrity_rq(req))
                        dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
-               }
        }
 
        nvme_cleanup_cmd(req);