static LIST_HEAD(failed_ddw_pdn_list);
 
+static phys_addr_t ddw_memory_hotplug_max(void)
+{
+       phys_addr_t max_addr = memory_hotplug_max();
+       struct device_node *memory;
+
+       for_each_node_by_type(memory, "memory") {
+               unsigned long start, size;
+               int ranges, n_mem_addr_cells, n_mem_size_cells, len;
+               const __be32 *memcell_buf;
+
+               memcell_buf = of_get_property(memory, "reg", &len);
+               if (!memcell_buf || len <= 0)
+                       continue;
+
+               n_mem_addr_cells = of_n_addr_cells(memory);
+               n_mem_size_cells = of_n_size_cells(memory);
+
+               /* ranges in cell */
+               ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
+
+               start = of_read_number(memcell_buf, n_mem_addr_cells);
+               memcell_buf += n_mem_addr_cells;
+               size = of_read_number(memcell_buf, n_mem_size_cells);
+               memcell_buf += n_mem_size_cells;
+
+               max_addr = max_t(phys_addr_t, max_addr, start + size);
+       }
+
+       return max_addr;
+}
+
 /*
  * If the PE supports dynamic dma windows, and there is space for a table
  * that can map all pages in a linear offset, then setup such a table,
        }
        /* verify the window * number of ptes will map the partition */
        /* check largest block * page size > max memory hotplug addr */
-       max_addr = memory_hotplug_max();
+       max_addr = ddw_memory_hotplug_max();
        if (query.largest_available_block < (max_addr >> page_shift)) {
                dev_dbg(&dev->dev, "can't map partition max 0x%llx with %u "
                          "%llu-sized pages\n", max_addr,  query.largest_available_block,