+++ /dev/null
-#ifndef _X86_MEMBLOCK_H
-#define _X86_MEMBLOCK_H
-
-void memblock_x86_reserve_range(u64 start, u64 end, char *name);
-void memblock_x86_free_range(u64 start, u64 end);
-
-#endif
 
                                addr, aper_size>>10);
                return 0;
        }
-       memblock_x86_reserve_range(addr, addr + aper_size, "aperture64");
+       memblock_reserve(addr, aper_size);
        /*
         * Kmemleak should not scan this block as it may not be mapped via the
         * kernel direct mapping.
 
                if (start >= end)
                        continue;
 
-               memblock_x86_reserve_range(start, end, "SCAN RAM");
+               memblock_reserve(start, end - start);
                scan_areas[num_scan_areas].addr = start;
                scan_areas[num_scan_areas].size = end - start;
 
 
                lowmem = 0x9f000;
 
        /* reserve all memory between lowmem and the 1MB mark */
-       memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved");
+       memblock_reserve(lowmem, 0x100000 - lowmem);
 }
 
 {
        memblock_init();
 
-       memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
+       memblock_reserve(__pa_symbol(&_text),
+                        __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
 
 #ifdef CONFIG_BLK_DEV_INITRD
        /* Reserve INITRD */
                u64 ramdisk_image = boot_params.hdr.ramdisk_image;
                u64 ramdisk_size  = boot_params.hdr.ramdisk_size;
                u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
-               memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK");
+               memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
        }
 #endif
 
 
 
        memblock_init();
 
-       memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
+       memblock_reserve(__pa_symbol(&_text),
+                        __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
 
 #ifdef CONFIG_BLK_DEV_INITRD
        /* Reserve INITRD */
                unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
                unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
                unsigned long ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
-               memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK");
+               memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
        }
 #endif
 
 
 
 static void __init smp_reserve_memory(struct mpf_intel *mpf)
 {
-       unsigned long size = get_mpc_size(mpf->physptr);
-
-       memblock_x86_reserve_range(mpf->physptr, mpf->physptr+size, "* MP-table mpc");
+       memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
 }
 
 static int __init smp_scan_config(unsigned long base, unsigned long length)
                               mpf, (u64)virt_to_phys(mpf));
 
                        mem = virt_to_phys(mpf);
-                       memblock_x86_reserve_range(mem, mem + sizeof(*mpf), "* MP-table mpf");
+                       memblock_reserve(mem, sizeof(*mpf));
                        if (mpf->physptr)
                                smp_reserve_memory(mpf);
 
 
 static void __init reserve_brk(void)
 {
        if (_brk_end > _brk_start)
-               memblock_x86_reserve_range(__pa(_brk_start), __pa(_brk_end), "BRK");
+               memblock_reserve(__pa(_brk_start),
+                                __pa(_brk_end) - __pa(_brk_start));
 
        /* Mark brk area as locked down and no longer taking any
           new allocations */
 
        /* Note: this includes all the lowmem currently occupied by
           the initrd, we rely on that fact to keep the data intact. */
-       memblock_x86_reserve_range(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK");
+       memblock_reserve(ramdisk_here, area_size);
        initrd_start = ramdisk_here + PAGE_OFFSET;
        initrd_end   = initrd_start + ramdisk_size;
        printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n",
        initrd_start = 0;
 
        if (ramdisk_size >= (end_of_lowmem>>1)) {
-               memblock_x86_free_range(ramdisk_image, ramdisk_end);
+               memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
                printk(KERN_ERR "initrd too large to handle, "
                       "disabling initrd\n");
                return;
 
        relocate_initrd();
 
-       memblock_x86_free_range(ramdisk_image, ramdisk_end);
+       memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
 }
 #else
 static void __init reserve_initrd(void)
 {
        struct setup_data *data;
        u64 pa_data;
-       char buf[32];
 
        if (boot_params.hdr.version < 0x0209)
                return;
        pa_data = boot_params.hdr.setup_data;
        while (pa_data) {
                data = early_memremap(pa_data, sizeof(*data));
-               sprintf(buf, "setup data %x", data->type);
-               memblock_x86_reserve_range(pa_data, pa_data+sizeof(*data)+data->len, buf);
+               memblock_reserve(pa_data, sizeof(*data) + data->len);
                pa_data = data->next;
                early_iounmap(data, sizeof(*data));
        }
                        return;
                }
        }
-       memblock_x86_reserve_range(crash_base, crash_base + crash_size, "CRASH KERNEL");
+       memblock_reserve(crash_base, crash_size);
 
        printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
                        "for crashkernel (System RAM: %ldMB)\n",
        addr = find_ibft_region(&size);
 
        if (size)
-               memblock_x86_reserve_range(addr, addr + size, "* ibft");
+               memblock_reserve(addr, size);
 }
 
 static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
 
                panic("Cannot allocate trampoline\n");
 
        x86_trampoline_base = __va(mem);
-       memblock_x86_reserve_range(mem, mem + size, "TRAMPOLINE");
+       memblock_reserve(mem, size);
 
        printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
               x86_trampoline_base, (unsigned long long)mem, size);
 
 obj-$(CONFIG_ACPI_NUMA)                += srat.o
 obj-$(CONFIG_NUMA_EMU)         += numa_emulation.o
 
-obj-$(CONFIG_HAVE_MEMBLOCK)            += memblock.o
-
 obj-$(CONFIG_MEMTEST)          += memtest.o
 
 
 void __init native_pagetable_reserve(u64 start, u64 end)
 {
-       memblock_x86_reserve_range(start, end, "PGTABLE");
+       memblock_reserve(start, end - start);
 }
 
 struct map_range {
         * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
         * so that they can be reused for other purposes.
         *
-        * On native it just means calling memblock_x86_reserve_range, on Xen it
-        * also means marking RW the pagetable pages that we allocated before
+        * On native it just means calling memblock_reserve, on Xen it also
+        * means marking RW the pagetable pages that we allocated before
         * but that haven't been used.
         *
         * In fact on xen we mark RO the whole range pgt_buf_start -
 
+++ /dev/null
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/memblock.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/range.h>
-
-void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
-{
-       if (start == end)
-               return;
-
-       if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end))
-               return;
-
-       memblock_dbg("    memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name);
-
-       memblock_reserve(start, end - start);
-}
-
-void __init memblock_x86_free_range(u64 start, u64 end)
-{
-       if (start == end)
-               return;
-
-       if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end))
-               return;
-
-       memblock_dbg("       memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1);
-
-       memblock_free(start, end - start);
-}
 
               (unsigned long long) pattern,
               (unsigned long long) start_bad,
               (unsigned long long) end_bad);
-       memblock_x86_reserve_range(start_bad, end_bad, "BAD RAM");
+       memblock_reserve(start_bad, end_bad - start_bad);
 }
 
 static void __init memtest(u64 pattern, u64 start_phys, u64 size)
 
 
        /* numa_distance could be 1LU marking allocation failure, test cnt */
        if (numa_distance_cnt)
-               memblock_x86_free_range(__pa(numa_distance),
-                                       __pa(numa_distance) + size);
+               memblock_free(__pa(numa_distance), size);
        numa_distance_cnt = 0;
        numa_distance = NULL;   /* enable table creation */
 }
                numa_distance = (void *)1LU;
                return -ENOMEM;
        }
-       memblock_x86_reserve_range(phys, phys + size, "NUMA DIST");
+       memblock_reserve(phys, size);
 
        numa_distance = __va(phys);
        numa_distance_cnt = cnt;
 
                           size, nid);
                return;
        }
-       memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM");
+       memblock_reserve(node_pa, size);
 
        remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
                                          max_low_pfn << PAGE_SHIFT,
        if (!remap_pa) {
                pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
                           size, nid);
-               memblock_x86_free_range(node_pa, node_pa + size);
+               memblock_free(node_pa, size);
                return;
        }
-       memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG");
+       memblock_reserve(remap_pa, size);
        remap_va = phys_to_virt(remap_pa);
 
        /* perform actual remap */
 
                        pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
                        goto no_emu;
                }
-               memblock_x86_reserve_range(phys, phys + phys_size, "TMP NUMA DIST");
+               memblock_reserve(phys, phys_size);
                phys_dist = __va(phys);
 
                for (i = 0; i < numa_dist_cnt; i++)
 
        /* free the copied physical distance table */
        if (phys_dist)
-               memblock_x86_free_range(__pa(phys_dist), __pa(phys_dist) + phys_size);
+               memblock_free(__pa(phys_dist), phys_size);
        return;
 
 no_emu:
 
                boot_params.efi_info.efi_memdesc_size;
        memmap.desc_version = boot_params.efi_info.efi_memdesc_version;
        memmap.desc_size = boot_params.efi_info.efi_memdesc_size;
-       memblock_x86_reserve_range(pmap, pmap + memmap.nr_map * memmap.desc_size,
-                     "EFI memmap");
+       memblock_reserve(pmap, memmap.nr_map * memmap.desc_size);
 }
 
 #if EFI_DEBUG
                                        "[0x%010llx-0x%010llx]\n",
                                                start, start+size-1);
                } else
-                       memblock_x86_reserve_range(start, start+size,
-                                                       "EFI Boot");
+                       memblock_reserve(start, size);
        }
 }
 
 
        __xen_write_cr3(true, __pa(pgd));
        xen_mc_issue(PARAVIRT_LAZY_CPU);
 
-       memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
-                     __pa(xen_start_info->pt_base +
-                          xen_start_info->nr_pt_frames * PAGE_SIZE),
-                     "XEN PAGETABLES");
+       memblock_reserve(__pa(xen_start_info->pt_base),
+                        xen_start_info->nr_pt_frames * PAGE_SIZE);
 
        return pgd;
 }
                          PFN_DOWN(__pa(initial_page_table)));
        xen_write_cr3(__pa(initial_page_table));
 
-       memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
-                     __pa(xen_start_info->pt_base +
-                          xen_start_info->nr_pt_frames * PAGE_SIZE),
-                     "XEN PAGETABLES");
+       memblock_reserve(__pa(xen_start_info->pt_base),
+                        xen_start_info->nr_pt_frames * PAGE_SIZE));
 
        return initial_page_table;
 }
 
        e820_add_region(extra_start, size, E820_RAM);
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 
-       memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA");
+       memblock_reserve(extra_start, size);
 
        xen_extra_mem_size += size;
 
         *  - xen_start_info
         * See comment above "struct start_info" in <xen/interface/xen.h>
         */
-       memblock_x86_reserve_range(__pa(xen_start_info->mfn_list),
-                     __pa(xen_start_info->pt_base),
-                       "XEN START INFO");
+       memblock_reserve(__pa(xen_start_info->mfn_list),
+                        xen_start_info->pt_base - xen_start_info->mfn_list);
 
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 
 
 #include <linux/init.h>
 #include <linux/mm.h>
 
-#include <asm/memblock.h>
-
 #define INIT_MEMBLOCK_REGIONS  128
 
 struct memblock_region {
 
 
 long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
 {
+       memblock_dbg("   memblock_free: [%#016llx-%#016llx] %pF\n",
+                    base, base + size, (void *)_RET_IP_);
+
        return __memblock_remove(&memblock.reserved, base, size);
 }
 
 {
        struct memblock_type *_rgn = &memblock.reserved;
 
+       memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n",
+                    base, base + size, (void *)_RET_IP_);
        BUG_ON(0 == size);
 
        return memblock_add_region(_rgn, base, size);
 
 
        ptr = phys_to_virt(addr);
        memset(ptr, 0, size);
-       memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
+       memblock_reserve(addr, size);
        /*
         * The min_count is set to 0 so that bootmem allocated blocks
         * are never reported as leaks.
                              unsigned long size)
 {
        kmemleak_free_part(__va(physaddr), size);
-       memblock_x86_free_range(physaddr, physaddr + size);
+       memblock_free(physaddr, size);
 }
 
 /**
 void __init free_bootmem(unsigned long addr, unsigned long size)
 {
        kmemleak_free_part(__va(addr), size);
-       memblock_x86_free_range(addr, addr + size);
+       memblock_free(addr, size);
 }
 
 static void * __init ___alloc_bootmem_nopanic(unsigned long size,