{
        struct se_device *dev = cmd->se_dev;
        struct se_dif_v1_tuple *sdt;
-       struct scatterlist *dsg, *psg = cmd->t_prot_sg;
+       struct scatterlist *dsg = cmd->t_data_sg, *psg;
        sector_t sector = cmd->t_task_lba;
        void *daddr, *paddr;
        int i, j, offset = 0;
+       unsigned int block_size = dev->dev_attrib.block_size;
 
-       for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
-               daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+       for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
                paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+               daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
 
-               for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+               for (j = 0; j < psg->length;
+                               j += sizeof(struct se_dif_v1_tuple)) {
+                       __u16 crc;
+                       unsigned int avail;
 
-                       if (offset >= psg->length) {
-                               kunmap_atomic(paddr);
-                               psg = sg_next(psg);
-                               paddr = kmap_atomic(sg_page(psg)) + psg->offset;
-                               offset = 0;
+                       if (offset >= dsg->length) {
+                               offset -= dsg->length;
+                               kunmap_atomic(daddr - dsg->offset);
+                               dsg = sg_next(dsg);
+                               if (!dsg) {
+                                       kunmap_atomic(paddr - psg->offset);
+                                       return;
+                               }
+                               daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
                        }
 
-                       sdt = paddr + offset;
-                       sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
-                                               dev->dev_attrib.block_size));
+                       sdt = paddr + j;
+                       avail = min(block_size, dsg->length - offset);
+                       crc = crc_t10dif(daddr + offset, avail);
+                       if (avail < block_size) {
+                               kunmap_atomic(daddr - dsg->offset);
+                               dsg = sg_next(dsg);
+                               if (!dsg) {
+                                       kunmap_atomic(paddr - psg->offset);
+                                       return;
+                               }
+                               daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+                               offset = block_size - avail;
+                               crc = crc_t10dif_update(crc, daddr, offset);
+                       } else {
+                               offset += block_size;
+                       }
+
+                       sdt->guard_tag = cpu_to_be16(crc);
                        if (cmd->prot_type == TARGET_DIF_TYPE1_PROT)
                                sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
                        sdt->app_tag = 0;
                                 be32_to_cpu(sdt->ref_tag));
 
                        sector++;
-                       offset += sizeof(struct se_dif_v1_tuple);
                }
 
-               kunmap_atomic(paddr);
-               kunmap_atomic(daddr);
+               kunmap_atomic(daddr - dsg->offset);
+               kunmap_atomic(paddr - psg->offset);
        }
 }
 
 static sense_reason_t
 sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt,
-                 const void *p, sector_t sector, unsigned int ei_lba)
+                 __u16 crc, sector_t sector, unsigned int ei_lba)
 {
-       struct se_device *dev = cmd->se_dev;
-       int block_size = dev->dev_attrib.block_size;
        __be16 csum;
 
        if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
                goto check_ref;
 
-       csum = cpu_to_be16(crc_t10dif(p, block_size));
+       csum = cpu_to_be16(crc);
 
        if (sdt->guard_tag != csum) {
                pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
 {
        struct se_device *dev = cmd->se_dev;
        struct se_dif_v1_tuple *sdt;
-       struct scatterlist *dsg;
+       struct scatterlist *dsg = cmd->t_data_sg;
        sector_t sector = start;
        void *daddr, *paddr;
-       int i, j;
+       int i;
        sense_reason_t rc;
+       int dsg_off = 0;
+       unsigned int block_size = dev->dev_attrib.block_size;
 
-       for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
-               daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+       for (; psg && sector < start + sectors; psg = sg_next(psg)) {
                paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+               daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
 
-               for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+               for (i = psg_off; i < psg->length &&
+                               sector < start + sectors;
+                               i += sizeof(struct se_dif_v1_tuple)) {
+                       __u16 crc;
+                       unsigned int avail;
 
-                       if (psg_off >= psg->length) {
-                               kunmap_atomic(paddr - psg->offset);
-                               psg = sg_next(psg);
-                               paddr = kmap_atomic(sg_page(psg)) + psg->offset;
-                               psg_off = 0;
+                       if (dsg_off >= dsg->length) {
+                               dsg_off -= dsg->length;
+                               kunmap_atomic(daddr - dsg->offset);
+                               dsg = sg_next(dsg);
+                               if (!dsg) {
+                                       kunmap_atomic(paddr - psg->offset);
+                                       return 0;
+                               }
+                               daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
                        }
 
-                       sdt = paddr + psg_off;
+                       sdt = paddr + i;
 
                        pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
                                 " app_tag: 0x%04x ref_tag: %u\n",
                                 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
 
                        if (sdt->app_tag == cpu_to_be16(0xffff)) {
-                               sector++;
-                               psg_off += sizeof(struct se_dif_v1_tuple);
-                               continue;
+                               dsg_off += block_size;
+                               goto next;
+                       }
+
+                       avail = min(block_size, dsg->length - dsg_off);
+                       crc = crc_t10dif(daddr + dsg_off, avail);
+                       if (avail < block_size) {
+                               kunmap_atomic(daddr - dsg->offset);
+                               dsg = sg_next(dsg);
+                               if (!dsg) {
+                                       kunmap_atomic(paddr - psg->offset);
+                                       return 0;
+                               }
+                               daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+                               dsg_off = block_size - avail;
+                               crc = crc_t10dif_update(crc, daddr, dsg_off);
+                       } else {
+                               dsg_off += block_size;
                        }
 
-                       rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector,
-                                              ei_lba);
+                       rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba);
                        if (rc) {
-                               kunmap_atomic(paddr - psg->offset);
                                kunmap_atomic(daddr - dsg->offset);
+                               kunmap_atomic(paddr - psg->offset);
                                cmd->bad_sector = sector;
                                return rc;
                        }
-
+next:
                        sector++;
                        ei_lba++;
-                       psg_off += sizeof(struct se_dif_v1_tuple);
                }
 
-               kunmap_atomic(paddr - psg->offset);
+               psg_off = 0;
                kunmap_atomic(daddr - dsg->offset);
+               kunmap_atomic(paddr - psg->offset);
        }
 
        return 0;