unsigned long size, reserve_pages = 0;
 
        for_each_online_node(nid) {
-               u64 node_kva_target;
-               u64 node_kva_final;
+               u64 node_kva;
 
                /*
                 * The acpi/srat node info can show hot-add memroy zones
                /* now the roundup is correct, convert to PAGE_SIZE pages */
                size = size * PTRS_PER_PTE;
 
-               node_kva_target = round_down(node_end_pfn[nid] - size,
-                                                PTRS_PER_PTE);
-               node_kva_target <<= PAGE_SHIFT;
-               do {
-                       node_kva_final = memblock_find_in_range(node_kva_target,
+               node_kva = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
                                        ((u64)node_end_pfn[nid])<<PAGE_SHIFT,
-                                               ((u64)size)<<PAGE_SHIFT,
-                                               LARGE_PAGE_BYTES);
-                       node_kva_target -= LARGE_PAGE_BYTES;
-               } while (node_kva_final == MEMBLOCK_ERROR &&
-                        (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid]));
-
-               if (node_kva_final == MEMBLOCK_ERROR)
+                                       ((u64)size)<<PAGE_SHIFT,
+                                       LARGE_PAGE_BYTES);
+               if (node_kva == MEMBLOCK_ERROR)
                        panic("Can not get kva ram\n");
 
                node_remap_size[nid] = size;
                reserve_pages += size;
                printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of"
                                  " node %d at %llx\n",
-                               size, nid, node_kva_final>>PAGE_SHIFT);
+                               size, nid, node_kva >> PAGE_SHIFT);
 
                /*
                 *  prevent kva address below max_low_pfn want it on system
                 *  to use it as free.
                 *  So memblock_x86_reserve_range here, hope we don't run out of that array
                 */
-               memblock_x86_reserve_range(node_kva_final,
-                             node_kva_final+(((u64)size)<<PAGE_SHIFT),
-                             "KVA RAM");
+               memblock_x86_reserve_range(node_kva,
+                                          node_kva + (((u64)size)<<PAGE_SHIFT),
+                                          "KVA RAM");
 
-               node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT;
+               node_remap_start_pfn[nid] = node_kva >> PAGE_SHIFT;
        }
        printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n",
                        reserve_pages);
 void __init initmem_init(void)
 {
        int nid;
-       long kva_target_pfn;
 
        /*
         * When mapping a NUMA machine we allocate the node_mem_map arrays
 
        kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
 
-       kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
-       do {
-               kva_start_pfn = memblock_find_in_range(kva_target_pfn<<PAGE_SHIFT,
-                                       max_low_pfn<<PAGE_SHIFT,
-                                       kva_pages<<PAGE_SHIFT,
-                                       PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT;
-               kva_target_pfn -= PTRS_PER_PTE;
-       } while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn);
-
+       kva_start_pfn = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
+                               max_low_pfn << PAGE_SHIFT,
+                               kva_pages << PAGE_SHIFT,
+                               PTRS_PER_PTE << PAGE_SHIFT) >> PAGE_SHIFT;
        if (kva_start_pfn == MEMBLOCK_ERROR)
                panic("Can not get kva space\n");