le16_to_cpu(map_buff->row_cnt));
        dev_info(&h->pdev->dev, "layout_map_count = %u\n",
                        le16_to_cpu(map_buff->layout_map_count));
+       dev_info(&h->pdev->dev, "flags = %u\n",
+                       le16_to_cpu(map_buff->flags));
+       if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
+               dev_info(&h->pdev->dev, "encrypytion = ON\n");
+       else
+               dev_info(&h->pdev->dev, "encrypytion = OFF\n");
+       dev_info(&h->pdev->dev, "dekindex = %u\n",
+                       le16_to_cpu(map_buff->dekindex));
 
        map_cnt = le16_to_cpu(map_buff->layout_map_count);
        for (map = 0; map < map_cnt; map++) {
                cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
 }
 
+/*
+ * Set encryption parameters for the ioaccel2 request
+ */
+static void set_encrypt_ioaccel2(struct ctlr_info *h,
+       struct CommandList *c, struct io_accel2_cmd *cp)
+{
+       struct scsi_cmnd *cmd = c->scsi_cmd;
+       struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+       struct raid_map_data *map = &dev->raid_map;
+       u64 first_block;
+
+       BUG_ON(!(dev->offload_config && dev->offload_enabled));
+
+       /* Are we doing encryption on this device */
+       if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
+               return;
+       /* Set the data encryption key index. */
+       cp->dekindex = map->dekindex;
+
+       /* Set the encryption enable flag, encoded into direction field. */
+       cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
+
+       /* Set encryption tweak values based on logical block address
+        * If block size is 512, tweak value is LBA.
+        * For other block sizes, tweak is (LBA * block size)/ 512)
+        */
+       switch (cmd->cmnd[0]) {
+       /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
+       case WRITE_6:
+       case READ_6:
+               if (map->volume_blk_size == 512) {
+                       cp->tweak_lower =
+                               (((u32) cmd->cmnd[2]) << 8) |
+                                       cmd->cmnd[3];
+                       cp->tweak_upper = 0;
+               } else {
+                       first_block =
+                               (((u64) cmd->cmnd[2]) << 8) |
+                                       cmd->cmnd[3];
+                       first_block = (first_block * map->volume_blk_size)/512;
+                       cp->tweak_lower = (u32)first_block;
+                       cp->tweak_upper = (u32)(first_block >> 32);
+               }
+               break;
+       case WRITE_10:
+       case READ_10:
+               if (map->volume_blk_size == 512) {
+                       cp->tweak_lower =
+                               (((u32) cmd->cmnd[2]) << 24) |
+                               (((u32) cmd->cmnd[3]) << 16) |
+                               (((u32) cmd->cmnd[4]) << 8) |
+                                       cmd->cmnd[5];
+                       cp->tweak_upper = 0;
+               } else {
+                       first_block =
+                               (((u64) cmd->cmnd[2]) << 24) |
+                               (((u64) cmd->cmnd[3]) << 16) |
+                               (((u64) cmd->cmnd[4]) << 8) |
+                                       cmd->cmnd[5];
+                       first_block = (first_block * map->volume_blk_size)/512;
+                       cp->tweak_lower = (u32)first_block;
+                       cp->tweak_upper = (u32)(first_block >> 32);
+               }
+               break;
+       /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
+       case WRITE_12:
+       case READ_12:
+               if (map->volume_blk_size == 512) {
+                       cp->tweak_lower =
+                               (((u32) cmd->cmnd[2]) << 24) |
+                               (((u32) cmd->cmnd[3]) << 16) |
+                               (((u32) cmd->cmnd[4]) << 8) |
+                                       cmd->cmnd[5];
+                       cp->tweak_upper = 0;
+               } else {
+                       first_block =
+                               (((u64) cmd->cmnd[2]) << 24) |
+                               (((u64) cmd->cmnd[3]) << 16) |
+                               (((u64) cmd->cmnd[4]) << 8) |
+                                       cmd->cmnd[5];
+                       first_block = (first_block * map->volume_blk_size)/512;
+                       cp->tweak_lower = (u32)first_block;
+                       cp->tweak_upper = (u32)(first_block >> 32);
+               }
+               break;
+       case WRITE_16:
+       case READ_16:
+               if (map->volume_blk_size == 512) {
+                       cp->tweak_lower =
+                               (((u32) cmd->cmnd[6]) << 24) |
+                               (((u32) cmd->cmnd[7]) << 16) |
+                               (((u32) cmd->cmnd[8]) << 8) |
+                                       cmd->cmnd[9];
+                       cp->tweak_upper =
+                               (((u32) cmd->cmnd[2]) << 24) |
+                               (((u32) cmd->cmnd[3]) << 16) |
+                               (((u32) cmd->cmnd[4]) << 8) |
+                                       cmd->cmnd[5];
+               } else {
+                       first_block =
+                               (((u64) cmd->cmnd[2]) << 56) |
+                               (((u64) cmd->cmnd[3]) << 48) |
+                               (((u64) cmd->cmnd[4]) << 40) |
+                               (((u64) cmd->cmnd[5]) << 32) |
+                               (((u64) cmd->cmnd[6]) << 24) |
+                               (((u64) cmd->cmnd[7]) << 16) |
+                               (((u64) cmd->cmnd[8]) << 8) |
+                                       cmd->cmnd[9];
+                       first_block = (first_block * map->volume_blk_size)/512;
+                       cp->tweak_lower = (u32)first_block;
+                       cp->tweak_upper = (u32)(first_block >> 32);
+               }
+               break;
+       default:
+               dev_err(&h->pdev->dev,
+                       "ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
+                       __func__);
+               BUG();
+               break;
+       }
+}
+
 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
        struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
        u8 *scsi3addr)
 
                switch (cmd->sc_data_direction) {
                case DMA_TO_DEVICE:
-                       cp->direction = IOACCEL2_DIR_DATA_OUT;
+                       cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+                       cp->direction |= IOACCEL2_DIR_DATA_OUT;
                        break;
                case DMA_FROM_DEVICE:
-                       cp->direction = IOACCEL2_DIR_DATA_IN;
+                       cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+                       cp->direction |= IOACCEL2_DIR_DATA_IN;
                        break;
                case DMA_NONE:
-                       cp->direction = IOACCEL2_DIR_NO_DATA;
+                       cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+                       cp->direction |= IOACCEL2_DIR_NO_DATA;
                        break;
                default:
                        dev_err(&h->pdev->dev, "unknown data direction: %d\n",
                        break;
                }
        } else {
-               cp->direction = IOACCEL2_DIR_NO_DATA;
+               cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+               cp->direction |= IOACCEL2_DIR_NO_DATA;
        }
+
+       /* Set encryption parameters, if necessary */
+       set_encrypt_ioaccel2(h, c, cp);
+
        cp->scsi_nexus = ioaccel_handle;
-       cp->Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
+       cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
                                DIRECT_LOOKUP_BIT;
        memcpy(cp->cdb, cdb, sizeof(cp->cdb));
        memset(cp->cciss_lun, 0, sizeof(cp->cciss_lun));
        if (c->cmd_type == CMD_IOACCEL2) {
                struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
                        &h->ioaccel2_cmd_pool[c->cmdindex];
-               *tagupper = cm2->Tag.upper;
-               *taglower = cm2->Tag.lower;
+               /* upper tag not used in ioaccel2 mode */
+               memset(tagupper, 0, sizeof(*tagupper));
+               *taglower = cm2->Tag;
                return;
        }
        *tagupper = c->Header.Tag.upper;
                break;
        }
        cmd_special_free(h, c);
-       dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
-               abort->Header.Tag.upper, abort->Header.Tag.lower);
+       dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
+               __func__, tagupper, taglower);
        return rc;
 }
 
 
 static void __attribute__((unused)) verify_offsets(void)
 {
+#define VERIFY_OFFSET(member, offset) \
+       BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
+
+       VERIFY_OFFSET(structure_size, 0);
+       VERIFY_OFFSET(volume_blk_size, 4);
+       VERIFY_OFFSET(volume_blk_cnt, 8);
+       VERIFY_OFFSET(phys_blk_shift, 16);
+       VERIFY_OFFSET(parity_rotation_shift, 17);
+       VERIFY_OFFSET(strip_size, 18);
+       VERIFY_OFFSET(disk_starting_blk, 20);
+       VERIFY_OFFSET(disk_blk_cnt, 28);
+       VERIFY_OFFSET(data_disks_per_row, 36);
+       VERIFY_OFFSET(metadata_disks_per_row, 38);
+       VERIFY_OFFSET(row_cnt, 40);
+       VERIFY_OFFSET(layout_map_count, 42);
+       VERIFY_OFFSET(flags, 44);
+       VERIFY_OFFSET(dekindex, 46);
+       /* VERIFY_OFFSET(reserved, 48 */
+       VERIFY_OFFSET(data, 64);
+
+#undef VERIFY_OFFSET
+
 #define VERIFY_OFFSET(member, offset) \
        BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
 
 
        u16   row_cnt;                  /* rows in each layout map */
        u16   layout_map_count;         /* layout maps (1 map per mirror/parity
                                         * group) */
-       u8    reserved[20];
+       u16   flags;                    /* Bit 0 set if encryption enabled */
+#define RAID_MAP_FLAG_ENCRYPT_ON  0x01
+       u16   dekindex;                 /* Data encryption key index. */
+       u8    reserved[16];
        struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
 };
 
  */
 struct io_accel2_cmd {
        u8  IU_type;                    /* IU Type */
-       u8  direction;                  /* Transfer direction, 2 bits */
+       u8  direction;                  /* direction, memtype, and encryption */
+#define IOACCEL2_DIRECTION_MASK                0x03 /* bits 0,1: direction  */
+#define IOACCEL2_DIRECTION_MEMTYPE_MASK        0x04 /* bit 2: memtype source/dest */
+                                            /*     0b=PCIe, 1b=DDR */
+#define IOACCEL2_DIRECTION_ENCRYPT_MASK        0x08 /* bit 3: encryption flag */
+                                            /*     0=off, 1=on */
        u8  reply_queue;                /* Reply Queue ID */
        u8  reserved1;                  /* Reserved */
        u32 scsi_nexus;                 /* Device Handle */
-       struct vals32 Tag;              /* cciss tag */
+       u32 Tag;                        /* cciss tag, lower 4 bytes only */
+       u32 tweak_lower;                /* Encryption tweak, lower 4 bytes */
        u8  cdb[16];                    /* SCSI Command Descriptor Block */
        u8  cciss_lun[8];               /* 8 byte SCSI address */
        u32 data_len;                   /* Total bytes to transfer */
 #define IOACCEL2_PRIORITY_MASK 0x78
 #define IOACCEL2_ATTR_MASK 0x07
        u8  sg_count;                   /* Number of sg elements */
-       u8  reserved3[2];               /* Reserved */
+       u16 dekindex;                   /* Data encryption key index */
        u64 err_ptr;                    /* Error Pointer */
        u32 err_len;                    /* Error Length*/
-       u8 reserved4[4];                /* Reserved */
+       u32 tweak_upper;                /* Encryption tweak, upper 4 bytes */
        struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
        struct io_accel2_scsi_response error_data;
        u8 pad[IOACCEL2_PAD];