} cont_a64_entry_t;
 
 #define PO_MODE_DIF_INSERT     0
-#define PO_MODE_DIF_REMOVE     BIT_0
-#define PO_MODE_DIF_PASS       BIT_1
-#define PO_MODE_DIF_REPLACE    (BIT_0 + BIT_1)
+#define PO_MODE_DIF_REMOVE     1
+#define PO_MODE_DIF_PASS       2
+#define PO_MODE_DIF_REPLACE    3
+#define PO_MODE_DIF_TCP_CKSUM  6
 #define PO_ENABLE_DIF_BUNDLING BIT_8
 #define PO_ENABLE_INCR_GUARD_SEED      BIT_3
 #define PO_DISABLE_INCR_REF_TAG        BIT_5
 /* Bit 21 of fw_attributes decides the MCTP capabilities */
 #define IS_MCTP_CAPABLE(ha)    (IS_QLA2031(ha) && \
                                ((ha)->fw_attributes_ext[0] & BIT_0))
+#define IS_PI_UNINIT_CAPABLE(ha)       (IS_QLA83XX(ha))
+#define IS_PI_IPGUARD_CAPABLE(ha)      (IS_QLA83XX(ha))
+#define IS_PI_DIFB_DIX0_CAPABLE(ha)    (0)
+#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha)        (IS_QLA83XX(ha))
+#define IS_PI_SPLIT_DET_CAPABLE(ha)    (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
+    (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
 
        /* HBA serial number */
        uint8_t         serial0;
 
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
        uint8_t guard = scsi_host_get_guard(cmd->device->host);
 
-       /* We only support T10 DIF right now */
-       if (guard != SHOST_DIX_GUARD_CRC) {
-               ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
-                   "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
-               return 0;
-       }
-
        /* We always use DIFF Bundling for best performance */
        *fw_prot_opts = 0;
 
                *fw_prot_opts |= PO_MODE_DIF_REMOVE;
                break;
        case SCSI_PROT_READ_PASS:
-               *fw_prot_opts |= PO_MODE_DIF_PASS;
-               break;
        case SCSI_PROT_WRITE_PASS:
-               *fw_prot_opts |= PO_MODE_DIF_PASS;
+               if (guard & SHOST_DIX_GUARD_IP)
+                       *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
+               else
+                       *fw_prot_opts |= PO_MODE_DIF_PASS;
                break;
        default:        /* Normal Request */
                *fw_prot_opts |= PO_MODE_DIF_PASS;
     unsigned int protcnt)
 {
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
-       scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 
        switch (scsi_get_prot_type(cmd)) {
        case SCSI_PROT_DIF_TYPE0:
                pkt->ref_tag_mask[3] = 0xff;
                break;
        }
-
-       ql_dbg(ql_dbg_io, vha, 0x3009,
-           "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
-           "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
-           pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
-           scsi_get_prot_type(cmd), cmd);
 }
 
 struct qla2_sgx {
        int     i;
        uint16_t        used_dsds = tot_dsds;
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
-       scsi_qla_host_t *vha = shost_priv(cmd->device->host);
-
-       uint8_t         *cp;
 
        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
                dma_addr_t      sle_dma;
                        cur_dsd = (uint32_t *)next_dsd;
                }
                sle_dma = sg_dma_address(sg);
-               ql_dbg(ql_dbg_io, vha, 0x300a,
-                   "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
-                   i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
+
                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
                avail_dsds--;
 
-               if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
-                       cp = page_address(sg_page(sg)) + sg->offset;
-                       ql_dbg(ql_dbg_io, vha, 0x300b,
-                           "User data buffer=%p for cmd=%p.\n", cp, cmd);
-               }
        }
        /* Null termination */
        *cur_dsd++ = 0;
        struct scsi_cmnd *cmd;
        uint32_t *cur_dsd = dsd;
        uint16_t        used_dsds = tot_dsds;
-       scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
-       uint8_t         *cp;
 
        cmd = GET_CMD_SP(sp);
        scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
                        cur_dsd = (uint32_t *)next_dsd;
                }
                sle_dma = sg_dma_address(sg);
-               if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
-                       ql_dbg(ql_dbg_io, vha, 0x3027,
-                           "%s(): %p, sg_entry %d - "
-                           "addr=0x%x0x%x, len=%d.\n",
-                           __func__, cur_dsd, i,
-                           LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
-               }
+
                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
 
-               if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
-                       cp = page_address(sg_page(sg)) + sg->offset;
-                       ql_dbg(ql_dbg_io, vha, 0x3028,
-                           "%s(): Protection Data buffer = %p.\n", __func__,
-                           cp);
-               }
                avail_dsds--;
        }
        /* Null termination */
 
        if (!qla2x00_hba_err_chk_enabled(sp))
                fw_prot_opts |= 0x10; /* Disable Guard tag checking */
+       /* HBA error checking enabled */
+       else if (IS_PI_UNINIT_CAPABLE(ha)) {
+               if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
+                   || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
+                       SCSI_PROT_DIF_TYPE2))
+                       fw_prot_opts |= BIT_10;
+               else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
+                   SCSI_PROT_DIF_TYPE3)
+                       fw_prot_opts |= BIT_11;
+       }
 
        if (!bundling) {
                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
 
                "Maximum queue depth to set for each LUN. "
                "Default is 32.");
 
-/* Do not change the value of this after module load */
-int ql2xenabledif = 0;
-module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
+int ql2xenabledif = 2;
+module_param(ql2xenabledif, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xenabledif,
                " Enable T10-CRC-DIF "
                " Default is 0 - No DIF Support. 1 - Enable it"
        scsi_qla_host_t *vha = shost_priv(sdev->host);
        struct req_que *req = vha->req;
 
+       if (IS_T10_PI_CAPABLE(vha->hw))
+               blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+
        if (sdev->tagged_supported)
                scsi_activate_tcq(sdev, req->max_q_depth);
        else
 
        if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
                if (ha->fw_attributes & BIT_4) {
-                       int prot = 0;
+                       int prot = 0, guard;
                        base_vha->flags.difdix_supported = 1;
                        ql_dbg(ql_dbg_init, base_vha, 0x00f1,
                            "Registering for DIF/DIX type 1 and 3 protection.\n");
                            | SHOST_DIX_TYPE1_PROTECTION
                            | SHOST_DIX_TYPE2_PROTECTION
                            | SHOST_DIX_TYPE3_PROTECTION);
-                       scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC);
+
+                       guard = SHOST_DIX_GUARD_CRC;
+
+                       if (IS_PI_IPGUARD_CAPABLE(ha) &&
+                           (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
+                               guard |= SHOST_DIX_GUARD_IP;
+
+                       scsi_host_set_guard(host, guard);
                } else
                        base_vha->flags.difdix_supported = 0;
        }