return 0;
 }
 
+int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
+{
+       struct ufs_hw_queue *hwq;
+       size_t utrdl_size, cqe_size;
+       int i;
+
+       for (i = 0; i < hba->nr_hw_queues; i++) {
+               hwq = &hba->uhq[i];
+
+               utrdl_size = sizeof(struct utp_transfer_req_desc) *
+                            hwq->max_entries;
+               hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
+                                                        &hwq->sqe_dma_addr,
+                                                        GFP_KERNEL);
+               if (!hwq->sqe_dma_addr) {
+                       dev_err(hba->dev, "SQE allocation failed\n");
+                       return -ENOMEM;
+               }
+
+               cqe_size = sizeof(struct cq_entry) * hwq->max_entries;
+               hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
+                                                        &hwq->cqe_dma_addr,
+                                                        GFP_KERNEL);
+               if (!hwq->cqe_dma_addr) {
+                       dev_err(hba->dev, "CQE allocation failed\n");
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+
 int ufshcd_mcq_init(struct ufs_hba *hba)
 {
-       int ret;
+       struct ufs_hw_queue *hwq;
+       int ret, i;
 
        ret = ufshcd_mcq_config_nr_queues(hba);
        if (ret)
                return ret;
 
        ret = ufshcd_vops_mcq_config_resource(hba);
-       return ret;
+       if (ret)
+               return ret;
+
+       hba->uhq = devm_kzalloc(hba->dev,
+                               hba->nr_hw_queues * sizeof(struct ufs_hw_queue),
+                               GFP_KERNEL);
+       if (!hba->uhq) {
+               dev_err(hba->dev, "ufs hw queue memory allocation failed\n");
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < hba->nr_hw_queues; i++) {
+               hwq = &hba->uhq[i];
+               hwq->max_entries = hba->nutrs;
+       }
+
+       /* The very first HW queue serves device commands */
+       hba->dev_cmd_queue = &hba->uhq[0];
+       /* Give dev_cmd_queue the minimal number of entries */
+       hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES;
+
+       return 0;
 }
 
 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
 int ufshcd_mcq_init(struct ufs_hba *hba);
 int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
+int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
 
 #define SD_ASCII_STD true
 #define SD_RAW false
 
                goto out;
        }
 
+       /*
+        * Skip utmrdl allocation; it may have been
+        * allocated during first pass and not released during
+        * MCQ memory allocation.
+        * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
+        */
+       if (hba->utmrdl_base_addr)
+               goto skip_utmrdl;
        /*
         * Allocate memory for UTP Task Management descriptors
         * UFSHCI requires 1024 byte alignment of UTMRD
                goto out;
        }
 
+skip_utmrdl:
        /* Allocate memory for local reference block */
        hba->lrb = devm_kcalloc(hba->dev,
                                hba->nutrs, sizeof(struct ufshcd_lrb),
        return ret;
 }
 
+/* SDB - Single Doorbell */
+static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
+{
+       size_t ucdl_size, utrdl_size;
+
+       ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs;
+       dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
+                          hba->ucdl_dma_addr);
+
+       utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
+       dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
+                          hba->utrdl_dma_addr);
+
+       devm_kfree(hba->dev, hba->lrb);
+}
+
 static int ufshcd_alloc_mcq(struct ufs_hba *hba)
 {
        int ret;
 
        hba->nutrs = ret;
        ret = ufshcd_mcq_init(hba);
-       if (ret) {
-               hba->nutrs = old_nutrs;
-               return ret;
+       if (ret)
+               goto err;
+
+       /*
+        * Previously allocated memory for nutrs may not be enough in MCQ mode.
+        * Number of supported tags in MCQ mode may be larger than SDB mode.
+        */
+       if (hba->nutrs != old_nutrs) {
+               ufshcd_release_sdb_queue(hba, old_nutrs);
+               ret = ufshcd_memory_alloc(hba);
+               if (ret)
+                       goto err;
+               ufshcd_host_memory_configure(hba);
        }
 
+       ret = ufshcd_mcq_memory_alloc(hba);
+       if (ret)
+               goto err;
+
        return 0;
+err:
+       hba->nutrs = old_nutrs;
+       return ret;
 }
 
 static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
 
  * @mcq_sup: is mcq supported by UFSHC
  * @res: array of resource info of MCQ registers
  * @mcq_base: Multi circular queue registers base address
+ * @uhq: array of supported hardware queues
+ * @dev_cmd_queue: Queue for issuing device management commands
  */
 struct ufs_hba {
        void __iomem *mmio_base;
        bool mcq_sup;
        struct ufshcd_res_info res[RES_MAX];
        void __iomem *mcq_base;
+       struct ufs_hw_queue *uhq;
+       struct ufs_hw_queue *dev_cmd_queue;
+};
+
+/**
+ * struct ufs_hw_queue - per hardware queue structure
+ * @sqe_base_addr: submission queue entry base address
+ * @sqe_dma_addr: submission queue dma address
+ * @cqe_base_addr: completion queue base address
+ * @cqe_dma_addr: completion queue dma address
+ * @max_entries: max number of slots in this hardware queue
+ */
+struct ufs_hw_queue {
+       void *sqe_base_addr;
+       dma_addr_t sqe_dma_addr;
+       struct cq_entry *cqe_base_addr;
+       dma_addr_t cqe_dma_addr;
+       u32 max_entries;
 };
 
 #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
 
        __le16  prd_table_offset;
 };
 
+/* MCQ Completion Queue Entry */
+struct cq_entry {
+       /* DW 0-1 */
+       __le64 command_desc_base_addr;
+
+       /* DW 2 */
+       __le16  response_upiu_length;
+       __le16  response_upiu_offset;
+
+       /* DW 3 */
+       __le16  prd_table_length;
+       __le16  prd_table_offset;
+
+       /* DW 4 */
+       __le32 status;
+
+       /* DW 5-7 */
+       __le32 reserved[3];
+};
+
+static_assert(sizeof(struct cq_entry) == 32);
+
 /*
  * UTMRD structure.
  */