#include <uapi/scsi/scsi_bsg_mpi3mr.h>
 
 /**
- * mpi3mr_alloc_trace_buffer:  Allocate trace buffer
+ * mpi3mr_alloc_trace_buffer: Allocate segmented trace buffer
  * @mrioc: Adapter instance reference
  * @trace_size: Trace buffer size
  *
- * Allocate trace buffer
+ * Allocate either segmented memory pools or contiguous buffer
+ * based on the controller capability for the host trace
+ * buffer.
+ *
  * Return: 0 on success, non-zero on failure.
  */
 static int mpi3mr_alloc_trace_buffer(struct mpi3mr_ioc *mrioc, u32 trace_size)
 {
        struct diag_buffer_desc *diag_buffer = &mrioc->diag_buffers[0];
+       int i, sz;
+       u64 *diag_buffer_list = NULL;
+       dma_addr_t diag_buffer_list_dma;
+       u32 seg_count;
+
+       if (mrioc->seg_tb_support) {
+               seg_count = (trace_size) / MPI3MR_PAGE_SIZE_4K;
+               trace_size = seg_count * MPI3MR_PAGE_SIZE_4K;
+
+               diag_buffer_list = dma_alloc_coherent(&mrioc->pdev->dev,
+                               sizeof(u64) * seg_count,
+                               &diag_buffer_list_dma, GFP_KERNEL);
+               if (!diag_buffer_list)
+                       return -1;
+
+               mrioc->num_tb_segs = seg_count;
+
+               sz = sizeof(struct segments) * seg_count;
+               mrioc->trace_buf = kzalloc(sz, GFP_KERNEL);
+               if (!mrioc->trace_buf)
+                       goto trace_buf_failed;
+
+               mrioc->trace_buf_pool = dma_pool_create("trace_buf pool",
+                   &mrioc->pdev->dev, MPI3MR_PAGE_SIZE_4K, MPI3MR_PAGE_SIZE_4K,
+                   0);
+               if (!mrioc->trace_buf_pool) {
+                       ioc_err(mrioc, "trace buf pool: dma_pool_create failed\n");
+                       goto trace_buf_pool_failed;
+               }
 
-       diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev,
-           trace_size, &diag_buffer->dma_addr, GFP_KERNEL);
-       if (diag_buffer->addr) {
-               dprint_init(mrioc, "trace diag buffer is allocated successfully\n");
+               for (i = 0; i < seg_count; i++) {
+                       mrioc->trace_buf[i].segment =
+                           dma_pool_zalloc(mrioc->trace_buf_pool, GFP_KERNEL,
+                           &mrioc->trace_buf[i].segment_dma);
+                       diag_buffer_list[i] =
+                           (u64) mrioc->trace_buf[i].segment_dma;
+                       if (!diag_buffer_list[i])
+                               goto tb_seg_alloc_failed;
+               }
+
+               diag_buffer->addr =  diag_buffer_list;
+               diag_buffer->dma_addr = diag_buffer_list_dma;
+               diag_buffer->is_segmented = true;
+
+               dprint_init(mrioc, "segmented trace diag buffer\n"
+                               "is allocated successfully seg_count:%d\n", seg_count);
                return 0;
+       } else {
+               diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev,
+                   trace_size, &diag_buffer->dma_addr, GFP_KERNEL);
+               if (diag_buffer->addr) {
+                       dprint_init(mrioc, "trace diag buffer is allocated successfully\n");
+                       return 0;
+               }
+               return -1;
        }
+
+tb_seg_alloc_failed:
+       if (mrioc->trace_buf_pool) {
+               for (i = 0; i < mrioc->num_tb_segs; i++) {
+                       if (mrioc->trace_buf[i].segment) {
+                               dma_pool_free(mrioc->trace_buf_pool,
+                                   mrioc->trace_buf[i].segment,
+                                   mrioc->trace_buf[i].segment_dma);
+                               mrioc->trace_buf[i].segment = NULL;
+                       }
+                       mrioc->trace_buf[i].segment = NULL;
+               }
+               dma_pool_destroy(mrioc->trace_buf_pool);
+               mrioc->trace_buf_pool = NULL;
+       }
+trace_buf_pool_failed:
+       kfree(mrioc->trace_buf);
+       mrioc->trace_buf = NULL;
+trace_buf_failed:
+       if (diag_buffer_list)
+               dma_free_coherent(&mrioc->pdev->dev,
+                   sizeof(u64) * mrioc->num_tb_segs,
+                   diag_buffer_list, diag_buffer_list_dma);
        return -1;
 }
 
                        dprint_init(mrioc,
                            "trying to allocate trace diag buffer of size = %dKB\n",
                            trace_size / 1024);
-               if (get_order(trace_size) > MAX_PAGE_ORDER ||
+               if ((!mrioc->seg_tb_support && (get_order(trace_size) > MAX_PAGE_ORDER)) ||
                    mpi3mr_alloc_trace_buffer(mrioc, trace_size)) {
+
                        retry = true;
                        trace_size -= trace_dec_size;
                        dprint_init(mrioc, "trace diag buffer allocation failed\n"
        u8 prev_status;
        int retval = 0;
 
+       if (diag_buffer->disabled_after_reset) {
+               dprint_bsg_err(mrioc, "%s: skiping diag buffer posting\n"
+                               "as it is disabled after reset\n", __func__);
+               return -1;
+       }
+
        memset(&diag_buf_post_req, 0, sizeof(diag_buf_post_req));
        mutex_lock(&mrioc->init_cmds.mutex);
        if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
        diag_buf_post_req.address = le64_to_cpu(diag_buffer->dma_addr);
        diag_buf_post_req.length = le32_to_cpu(diag_buffer->size);
 
-       dprint_bsg_info(mrioc, "%s: posting diag buffer type %d\n", __func__,
-           diag_buffer->type);
+       if (diag_buffer->is_segmented)
+               diag_buf_post_req.msg_flags |= MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED;
+
+       dprint_bsg_info(mrioc, "%s: posting diag buffer type %d segmented:%d\n", __func__,
+           diag_buffer->type, diag_buffer->is_segmented);
+
        prev_status = diag_buffer->status;
        diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED;
        init_completion(&mrioc->init_cmds.done);
 
              (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
                retval = 0;
 
-       ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
+       ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%08x)/(0x%08x)\n",
            (!retval) ? "successful" : "failed", ioc_status, ioc_config);
        return retval;
 }
                    "\tcontroller while sas transport support is enabled at the\n"
                    "\tdriver, please reboot the system or reload the driver\n");
 
+       if (mrioc->seg_tb_support) {
+               if (!(mrioc->facts.ioc_capabilities &
+                    MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)) {
+                       ioc_err(mrioc,
+                           "critical error: previously enabled segmented trace\n"
+                           " buffer capability is disabled after reset. Please\n"
+                           " update the firmware or reboot the system or\n"
+                           " reload the driver to enable trace diag buffer\n");
+                       mrioc->diag_buffers[0].disabled_after_reset = true;
+               } else
+                       mrioc->diag_buffers[0].disabled_after_reset = false;
+       }
+
        if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
                removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
                                                  GFP_KERNEL);
        ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
        ioc_status = readl(&mrioc->sysif_regs->ioc_status);
        ioc_info(mrioc,
-           "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
+           "ioc_status/ioc_config after %s reset is (0x%08x)/(0x%08x)\n",
            (!retval)?"successful":"failed", ioc_status,
            ioc_config);
        if (retval)
        if (mrioc->facts.max_req_limit)
                mrioc->prevent_reply_qfull = true;
 
+       if (mrioc->facts.ioc_capabilities &
+               MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)
+               mrioc->seg_tb_support = true;
+
        mrioc->reply_sz = mrioc->facts.reply_sz;
 
        retval = mpi3mr_check_reset_dma_mask(mrioc);
  */
 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
 {
-       u16 i;
+       u16 i, j;
        struct mpi3mr_intr_info *intr_info;
        struct diag_buffer_desc *diag_buffer;
 
 
        for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
                diag_buffer = &mrioc->diag_buffers[i];
+               if ((i == 0) && mrioc->seg_tb_support) {
+                       if (mrioc->trace_buf_pool) {
+                               for (j = 0; j < mrioc->num_tb_segs; j++) {
+                                       if (mrioc->trace_buf[j].segment) {
+                                               dma_pool_free(mrioc->trace_buf_pool,
+                                                   mrioc->trace_buf[j].segment,
+                                                   mrioc->trace_buf[j].segment_dma);
+                                               mrioc->trace_buf[j].segment = NULL;
+                                       }
+
+                                       mrioc->trace_buf[j].segment = NULL;
+                               }
+                               dma_pool_destroy(mrioc->trace_buf_pool);
+                               mrioc->trace_buf_pool = NULL;
+                       }
+
+                       kfree(mrioc->trace_buf);
+                       mrioc->trace_buf = NULL;
+                       diag_buffer->size = sizeof(u64) * mrioc->num_tb_segs;
+               }
                if (diag_buffer->addr) {
                        dma_free_coherent(&mrioc->pdev->dev,
                            diag_buffer->size, diag_buffer->addr,
        }
 
        ioc_info(mrioc,
-           "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
+           "Base IOC Sts/Config after %s shutdown is (0x%08x)/(0x%08x)\n",
            (!retval) ? "successful" : "failed", ioc_status,
            ioc_config);
 }