memblock_reserve(__pa_symbol(&_text),
                         __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
 
-#ifdef CONFIG_BLK_DEV_INITRD
-       /* Reserve INITRD */
-       if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
-               /* Assume only end is not page aligned */
-               u64 ramdisk_image = boot_params.hdr.ramdisk_image;
-               u64 ramdisk_size  = boot_params.hdr.ramdisk_size;
-               u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
-               memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
-       }
-#endif
-
        /* Call the subarch specific early setup function */
        switch (boot_params.hdr.hardware_subarch) {
        case X86_SUBARCH_MRST:
 
        memblock_reserve(__pa_symbol(&_text),
                         __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
 
-#ifdef CONFIG_BLK_DEV_INITRD
-       /* Reserve INITRD */
-       if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
-               /* Assume only end is not page aligned */
-               unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
-               unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
-               unsigned long ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
-               memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
-       }
-#endif
-
        reserve_ebda_region();
 
        /*
 
 
        return mapped_pages << PAGE_SHIFT;
 }
+static void __init early_reserve_initrd(void)
+{
+       /* Assume only end is not page aligned */
+       u64 ramdisk_image = boot_params.hdr.ramdisk_image;
+       u64 ramdisk_size  = boot_params.hdr.ramdisk_size;
+       u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
+
+       if (!boot_params.hdr.type_of_loader ||
+           !ramdisk_image || !ramdisk_size)
+               return;         /* No initrd provided by bootloader */
+
+       memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
+}
 static void __init reserve_initrd(void)
 {
        /* Assume only end is not page aligned */
        if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
                                PFN_DOWN(ramdisk_end))) {
                /* All are mapped, easy case */
-               /*
-                * don't need to reserve again, already reserved early
-                * in i386_start_kernel
-                */
                initrd_start = ramdisk_image + PAGE_OFFSET;
                initrd_end = initrd_start + ramdisk_size;
                return;
        memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
 }
 #else
+static void __init early_reserve_initrd(void)
+{
+}
 static void __init reserve_initrd(void)
 {
 }
 
 void __init setup_arch(char **cmdline_p)
 {
+       early_reserve_initrd();
+
 #ifdef CONFIG_X86_32
        memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
        visws_early_detect();