return ret;
 }
 
+/*
+ * Returns true, if the given range overlaps with reserved memory ranges
+ * starting at idx. Also, updates idx to index of overlapping memory range
+ * with the given memory range.
+ * False, otherwise.
+ */
+static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx)
+{
+       bool ret = false;
+       int i;
+
+       for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) {
+               u64 rbase = reserved_mrange_info.mem_ranges[i].base;
+               u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size;
+
+               if (end <= rbase)
+                       break;
+
+               if ((end > rbase) &&  (base < rend)) {
+                       *idx = i;
+                       ret = true;
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+/*
+ * Locate a suitable memory area to reserve memory for FADump. While at it,
+ * lookup reserved-ranges & avoid overlap with them, as they are used by F/W.
+ */
+static u64 __init fadump_locate_reserve_mem(u64 base, u64 size)
+{
+       struct fadump_memory_range *mrngs;
+       phys_addr_t mstart, mend;
+       int idx = 0;
+       u64 i, ret = 0;
+
+       mrngs = reserved_mrange_info.mem_ranges;
+       for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
+                               &mstart, &mend, NULL) {
+               pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n",
+                        i, mstart, mend, base);
+
+               if (mstart > base)
+                       base = PAGE_ALIGN(mstart);
+
+               while ((mend > base) && ((mend - base) >= size)) {
+                       if (!overlaps_reserved_ranges(base, base+size, &idx)) {
+                               ret = base;
+                               goto out;
+                       }
+
+                       base = mrngs[idx].base + mrngs[idx].size;
+                       base = PAGE_ALIGN(base);
+               }
+       }
+
+out:
+       return ret;
+}
+
 int __init fadump_reserve_mem(void)
 {
-       u64 base, size, mem_boundary, bootmem_min, align = PAGE_SIZE;
-       bool is_memblock_bottom_up = memblock_bottom_up();
+       u64 base, size, mem_boundary, bootmem_min;
        int ret = 1;
 
        if (!fw_dump.fadump_enabled)
                        PAGE_ALIGN(fadump_calculate_reserve_size());
 #ifdef CONFIG_CMA
                if (!fw_dump.nocma) {
-                       align = FADUMP_CMA_ALIGNMENT;
                        fw_dump.boot_memory_size =
-                               ALIGN(fw_dump.boot_memory_size, align);
+                               ALIGN(fw_dump.boot_memory_size,
+                                     FADUMP_CMA_ALIGNMENT);
                }
 #endif
 
                 * Reserve memory at an offset closer to bottom of the RAM to
                 * minimize the impact of memory hot-remove operation.
                 */
-               memblock_set_bottom_up(true);
-               base = memblock_find_in_range(base, mem_boundary, size, align);
-
-               /* Restore the previous allocation mode */
-               memblock_set_bottom_up(is_memblock_bottom_up);
+               base = fadump_locate_reserve_mem(base, size);
 
                if (!base) {
                        pr_err("Failed to find memory chunk for reservation!\n");