unsigned long size;
 
                        size = initrd_end - initrd_start;
-                       free_bootmem_node(NODE_DATA(0), __pa(initrd_start),
-                                         PAGE_ALIGN(size));
+                       memblock_free(__pa(initrd_start), PAGE_ALIGN(size));
                        if (!move_initrd(pci_mem))
                                printk("irongate_init_arch: initrd too big "
                                       "(%ldK)\ndisabling initrd\n",
 
         * memmap array.
         */
        if (pg < pgend)
-               free_bootmem(pg, pgend - pg);
+               memblock_free(pg, pgend - pg);
 }
 
 /*
 
                extern void show_kernel_relocation(const char *level);
 
                offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
-               free_bootmem(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
+               memblock_free(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
 
 #if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
                /*
 
 
 static void __init pcpu_fc_free(void *ptr, size_t size)
 {
-       free_bootmem(__pa(ptr), size);
+       memblock_free(__pa(ptr), size);
 }
 
 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
 
 
 static void __init pcpu_free_bootmem(void *ptr, size_t size)
 {
-       free_bootmem(__pa(ptr), size);
+       memblock_free(__pa(ptr), size);
 }
 
 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
 
 #include <linux/stddef.h>
 #include <linux/module.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/highmem.h>
 #include <linux/mm.h>
 #include <linux/swap.h>
         */
        brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
        map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
-       free_bootmem(__pa(brk_end), uml_reserved - brk_end);
+       memblock_free(__pa(brk_end), uml_reserved - brk_end);
        uml_reserved = brk_end;
 
        /* this will put all low memory onto the freelists */
 
         * free the section of the memmap array.
         */
        if (pg < pgend)
-               free_bootmem(pg, pgend - pg);
+               memblock_free(pg, pgend - pg);
 }
 
 /*
 
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/percpu.h>
 #include <linux/kexec.h>
 #include <linux/crash_dump.h>
 
 static void __init pcpu_fc_free(void *ptr, size_t size)
 {
-       free_bootmem(__pa(ptr), size);
+       memblock_free(__pa(ptr), size);
 }
 
 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
 
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <asm/tce.h>
 #include <asm/calgary.h>
 #include <asm/proto.h>
        size = table_size_to_number_of_entries(specified_table_size);
        size *= TCE_ENTRY_SIZE;
 
-       free_bootmem(__pa(tbl), size);
+       memblock_free(__pa(tbl), size);
 }
 
 #include <linux/sched.h>
 #include <linux/seq_file.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 
 static void __ref free_p2m_page(void *p)
 {
        if (unlikely(!slab_is_available())) {
-               free_bootmem((unsigned long)p, PAGE_SIZE);
+               memblock_free((unsigned long)p, PAGE_SIZE);
                return;
        }
 
 
 fail_db_node:
        of_node_put(smu->db_node);
 fail_bootmem:
-       free_bootmem(__pa(smu), sizeof(struct smu_device));
+       memblock_free(__pa(smu), sizeof(struct smu_device));
        smu = NULL;
 fail_np:
        of_node_put(np);
 
 #include <linux/pci_regs.h>
 #include <linux/pci_ids.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/io.h>
 #include <asm/pci-direct.h>
 #include <asm/fixmap.h>
        if (!seg)
                return;
 
-       free_bootmem(seg->dma, PAGE_SIZE);
+       memblock_free(seg->dma, PAGE_SIZE);
        ring->segment = NULL;
 }
 
                xdbc_free_ring(&xdbc.in_ring);
 
                if (xdbc.table_dma)
-                       free_bootmem(xdbc.table_dma, PAGE_SIZE);
+                       memblock_free(xdbc.table_dma, PAGE_SIZE);
 
                if (xdbc.out_dma)
-                       free_bootmem(xdbc.out_dma, PAGE_SIZE);
+                       memblock_free(xdbc.out_dma, PAGE_SIZE);
 
                xdbc.table_base = NULL;
                xdbc.out_buf = NULL;
        xdbc_free_ring(&xdbc.evt_ring);
        xdbc_free_ring(&xdbc.out_ring);
        xdbc_free_ring(&xdbc.in_ring);
-       free_bootmem(xdbc.table_dma, PAGE_SIZE);
-       free_bootmem(xdbc.out_dma, PAGE_SIZE);
+       memblock_free(xdbc.table_dma, PAGE_SIZE);
+       memblock_free(xdbc.out_dma, PAGE_SIZE);
        writel(0, &xdbc.xdbc_reg->control);
        early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
 
 
 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/dma-direct.h>
 #include <linux/export.h>
 #include <xen/swiotlb-xen.h>
                               xen_io_tlb_nslabs);
        if (rc) {
                if (early)
-                       free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
+                       memblock_free(__pa(xen_io_tlb_start),
+                                     PAGE_ALIGN(bytes));
                else {
                        free_pages((unsigned long)xen_io_tlb_start, order);
                        xen_io_tlb_start = NULL;
 
 extern void reset_node_managed_pages(pg_data_t *pgdat);
 extern void reset_all_zones_managed_pages(void);
 
-extern void free_bootmem_node(pg_data_t *pgdat,
-                             unsigned long addr,
-                             unsigned long size);
-extern void free_bootmem(unsigned long physaddr, unsigned long size);
 extern void free_bootmem_late(unsigned long physaddr, unsigned long size);
 
 /* We are using top down, so it is safe to use 0 here */
 
 
        return pages;
 }
-
-/**
- * free_bootmem_node - mark a page range as usable
- * @pgdat: node the range resides on
- * @physaddr: starting physical address of the range
- * @size: size of the range in bytes
- *
- * Partial pages will be considered reserved and left as they are.
- *
- * The range must reside completely on the specified node.
- */
-void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
-                             unsigned long size)
-{
-       memblock_free(physaddr, size);
-}
-
-/**
- * free_bootmem - mark a page range as usable
- * @addr: starting physical address of the range
- * @size: size of the range in bytes
- *
- * Partial pages will be considered reserved and left as they are.
- *
- * The range must be contiguous but may span node boundaries.
- */
-void __init free_bootmem(unsigned long addr, unsigned long size)
-{
-       memblock_free(addr, size);
-}