#include "debug.h"
 #include <linux/of.h>
 #include <linux/firmware.h>
+#include <linux/of_address.h>
+#include <linux/ioport.h>
 
 #define SLEEP_CLOCK_SELECT_INTERNAL_BIT        0x02
 #define HOST_CSTATE_BIT                        0x04
         * failure to firmware and firmware then request multiple blocks of
         * small chunk size memory.
         */
-       if (ab->qmi.target_mem_delayed) {
+       if (!test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags) &&
+           ab->qmi.target_mem_delayed) {
                delayed = true;
                ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi delays mem_request %d\n",
                           ab->qmi.mem_seg_count);
 {
        struct ath12k_hw_group *ag = ab->ag;
        struct target_mem_chunk *mlo_chunk;
+       bool fixed_mem;
 
        lockdep_assert_held(&ag->mutex);
 
                return;
        }
 
+       fixed_mem = test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags);
        mlo_chunk = &ag->mlo_mem.chunk[idx];
-       if (mlo_chunk->v.addr) {
+
+       if (fixed_mem && mlo_chunk->v.ioaddr) {
+               iounmap(mlo_chunk->v.ioaddr);
+               mlo_chunk->v.ioaddr = NULL;
+       } else if (mlo_chunk->v.addr) {
                dma_free_coherent(ab->dev,
                                  mlo_chunk->size,
                                  mlo_chunk->v.addr,
 
        mlo_chunk->paddr = 0;
        mlo_chunk->size = 0;
-       chunk->v.addr = NULL;
+       if (fixed_mem)
+               chunk->v.ioaddr = NULL;
+       else
+               chunk->v.addr = NULL;
        chunk->paddr = 0;
        chunk->size = 0;
 }
        int i, mlo_idx;
 
        for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
-               if (!ab->qmi.target_mem[i].v.addr)
-                       continue;
-
                if (ab->qmi.target_mem[i].type == MLO_GLOBAL_MEM_REGION_TYPE) {
                        ath12k_qmi_free_mlo_mem_chunk(ab,
                                                      &ab->qmi.target_mem[i],
                                                      mlo_idx++);
                } else {
-                       dma_free_coherent(ab->dev,
-                                         ab->qmi.target_mem[i].prev_size,
-                                         ab->qmi.target_mem[i].v.addr,
-                                         ab->qmi.target_mem[i].paddr);
-                       ab->qmi.target_mem[i].v.addr = NULL;
+                       if (test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags) &&
+                           ab->qmi.target_mem[i].v.ioaddr) {
+                               iounmap(ab->qmi.target_mem[i].v.ioaddr);
+                               ab->qmi.target_mem[i].v.ioaddr = NULL;
+                       } else {
+                               if (!ab->qmi.target_mem[i].v.addr)
+                                       continue;
+                               dma_free_coherent(ab->dev,
+                                                 ab->qmi.target_mem[i].prev_size,
+                                                 ab->qmi.target_mem[i].v.addr,
+                                                 ab->qmi.target_mem[i].paddr);
+                               ab->qmi.target_mem[i].v.addr = NULL;
+                       }
                }
        }
 
        return ret;
 }
 
+static int ath12k_qmi_assign_target_mem_chunk(struct ath12k_base *ab)
+{
+       struct reserved_mem *rmem;
+       size_t avail_rmem_size;
+       int i, idx, ret;
+
+       for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
+               switch (ab->qmi.target_mem[i].type) {
+               case HOST_DDR_REGION_TYPE:
+                       rmem = ath12k_core_get_reserved_mem(ab, 0);
+                       if (!rmem) {
+                               ret = -ENODEV;
+                               goto out;
+                       }
+
+                       avail_rmem_size = rmem->size;
+                       if (avail_rmem_size < ab->qmi.target_mem[i].size) {
+                               ath12k_dbg(ab, ATH12K_DBG_QMI,
+                                          "failed to assign mem type %u req size %u avail size %zu\n",
+                                          ab->qmi.target_mem[i].type,
+                                          ab->qmi.target_mem[i].size,
+                                          avail_rmem_size);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+
+                       ab->qmi.target_mem[idx].paddr = rmem->base;
+                       ab->qmi.target_mem[idx].v.ioaddr =
+                               ioremap(ab->qmi.target_mem[idx].paddr,
+                                       ab->qmi.target_mem[i].size);
+                       if (!ab->qmi.target_mem[idx].v.ioaddr) {
+                               ret = -EIO;
+                               goto out;
+                       }
+                       ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+                       ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+                       idx++;
+                       break;
+               case BDF_MEM_REGION_TYPE:
+                       rmem = ath12k_core_get_reserved_mem(ab, 0);
+                       if (!rmem) {
+                               ret = -ENODEV;
+                               goto out;
+                       }
+
+                       avail_rmem_size = rmem->size - ab->hw_params->bdf_addr_offset;
+                       if (avail_rmem_size < ab->qmi.target_mem[i].size) {
+                               ath12k_dbg(ab, ATH12K_DBG_QMI,
+                                          "failed to assign mem type %u req size %u avail size %zu\n",
+                                          ab->qmi.target_mem[i].type,
+                                          ab->qmi.target_mem[i].size,
+                                          avail_rmem_size);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       ab->qmi.target_mem[idx].paddr =
+                               rmem->base + ab->hw_params->bdf_addr_offset;
+                       ab->qmi.target_mem[idx].v.ioaddr =
+                               ioremap(ab->qmi.target_mem[idx].paddr,
+                                       ab->qmi.target_mem[i].size);
+                       if (!ab->qmi.target_mem[idx].v.ioaddr) {
+                               ret = -EIO;
+                               goto out;
+                       }
+                       ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+                       ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+                       idx++;
+                       break;
+               case CALDB_MEM_REGION_TYPE:
+                       /* Cold boot calibration is not enabled in Ath12k. Hence,
+                        * assign paddr = 0.
+                        * Once cold boot calibration is enabled add support to
+                        * assign reserved memory from DT.
+                        */
+                       ab->qmi.target_mem[idx].paddr = 0;
+                       ab->qmi.target_mem[idx].v.ioaddr = NULL;
+                       ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+                       ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+                       idx++;
+                       break;
+               case M3_DUMP_REGION_TYPE:
+                       rmem = ath12k_core_get_reserved_mem(ab, 1);
+                       if (!rmem) {
+                               ret = -EINVAL;
+                               goto out;
+                       }
+
+                       avail_rmem_size = rmem->size;
+                       if (avail_rmem_size < ab->qmi.target_mem[i].size) {
+                               ath12k_dbg(ab, ATH12K_DBG_QMI,
+                                          "failed to assign mem type %u req size %u avail size %zu\n",
+                                          ab->qmi.target_mem[i].type,
+                                          ab->qmi.target_mem[i].size,
+                                          avail_rmem_size);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+
+                       ab->qmi.target_mem[idx].paddr = rmem->base;
+                       ab->qmi.target_mem[idx].v.ioaddr =
+                               ioremap(ab->qmi.target_mem[idx].paddr,
+                                       ab->qmi.target_mem[i].size);
+                       if (!ab->qmi.target_mem[idx].v.ioaddr) {
+                               ret = -EIO;
+                               goto out;
+                       }
+                       ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+                       ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+                       idx++;
+                       break;
+               default:
+                       ath12k_warn(ab, "qmi ignore invalid mem req type %u\n",
+                                   ab->qmi.target_mem[i].type);
+                       break;
+               }
+       }
+       ab->qmi.mem_seg_count = idx;
+
+       return 0;
+out:
+       ath12k_qmi_free_target_mem_chunk(ab);
+       return ret;
+}
+
 /* clang stack usage explodes if this is inlined */
 static noinline_for_stack
 int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
                           msg->mem_seg[i].type, msg->mem_seg[i].size);
        }
 
-       ret = ath12k_qmi_alloc_target_mem_chunk(ab);
-       if (ret) {
-               ath12k_warn(ab, "qmi failed to alloc target memory: %d\n",
-                           ret);
-               return;
+       if (test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags)) {
+               ret = ath12k_qmi_assign_target_mem_chunk(ab);
+               if (ret) {
+                       ath12k_warn(ab, "failed to assign qmi target memory: %d\n",
+                                   ret);
+                       return;
+               }
+       } else {
+               ret = ath12k_qmi_alloc_target_mem_chunk(ab);
+               if (ret) {
+                       ath12k_warn(ab, "qmi failed to alloc target memory: %d\n",
+                                   ret);
+                       return;
+               }
        }
 
        ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_REQUEST_MEM, NULL);