OBJS           += string.o
 CFLAGS_string.o        := -Os
 
+ifeq ($(CONFIG_ARM_VIRT_EXT),y)
+OBJS           += hyp-stub.o
+endif
+
 #
 # Architecture dependencies
 #
 endif
 
 ccflags-y := -fpic -fno-builtin -I$(obj)
-asflags-y := -Wa,-march=all
+asflags-y := -Wa,-march=all -DZIMAGE
 
 # Supply kernel BSS size to the decompressor via a linker symbol.
 KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \
 
 $(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
        @sed "$(SEDFLAGS)" < $< > $@
+
+$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S
+       $(call cmd,shipped)
 
  * published by the Free Software Foundation.
  */
 #include <linux/linkage.h>
+#include <asm/assembler.h>
 
 /*
  * Debugging stuff
                .word   start                   @ absolute load/run zImage address
                .word   _edata                  @ zImage end address
  THUMB(                .thumb                  )
-1:             mov     r7, r1                  @ save architecture ID
+1:
+               mrs     r9, cpsr
+#ifdef CONFIG_ARM_VIRT_EXT
+               bl      __hyp_stub_install      @ get into SVC mode, reversibly
+#endif
+               mov     r7, r1                  @ save architecture ID
                mov     r8, r2                  @ save atags pointer
 
 #ifndef __ARM_ARCH_2__
  ARM(          swi     0x123456        )       @ angel_SWI_ARM
  THUMB(                svc     0xab            )       @ angel_SWI_THUMB
 not_angel:
-               mrs     r2, cpsr                @ turn off interrupts to
-               orr     r2, r2, #0xc0           @ prevent angel from running
-               msr     cpsr_c, r2
+               safe_svcmode_maskall r0
+               msr     spsr_cxsf, r9           @ Save the CPU boot mode in
+                                               @ SPSR
 #else
                teqp    pc, #0x0c000003         @ turn off interrupts
 #endif
                adr     r5, restart
                bic     r5, r5, #31
 
+/* Relocate the hyp vector base if necessary */
+#ifdef CONFIG_ARM_VIRT_EXT
+               mrs     r0, spsr
+               and     r0, r0, #MODE_MASK
+               cmp     r0, #HYP_MODE
+               bne     1f
+
+               bl      __hyp_get_vectors
+               sub     r0, r0, r5
+               add     r0, r0, r10
+               bl      __hyp_set_vectors
+1:
+#endif
+
                sub     r9, r6, r5              @ size to copy
                add     r9, r9, #31             @ rounded up to a multiple
                bic     r9, r9, #31             @ ... of 32 bytes
                bl      decompress_kernel
                bl      cache_clean_flush
                bl      cache_off
-               mov     r0, #0                  @ must be zero
                mov     r1, r7                  @ restore architecture number
                mov     r2, r8                  @ restore atags pointer
- ARM(          mov     pc, r4  )               @ call kernel
- THUMB(                bx      r4      )               @ entry point is always ARM
+
+#ifdef CONFIG_ARM_VIRT_EXT
+               mrs     r0, spsr                @ Get saved CPU boot mode
+               and     r0, r0, #MODE_MASK
+               cmp     r0, #HYP_MODE           @ if not booted in HYP mode...
+               bne     __enter_kernel          @ boot kernel directly
+
+               adr     r12, .L__hyp_reentry_vectors_offset
+               ldr     r0, [r12]
+               add     r0, r0, r12
+
+               bl      __hyp_set_vectors
+               __HVC(0)                        @ otherwise bounce to hyp mode
+
+               b       .                       @ should never be reached
+
+               .align  2
+.L__hyp_reentry_vectors_offset:        .long   __hyp_reentry_vectors - .
+#else
+               b       __enter_kernel
+#endif
 
                .align  2
                .type   LC0, #object
 #endif
 
                .ltorg
+
+#ifdef CONFIG_ARM_VIRT_EXT
+.align 5
+__hyp_reentry_vectors:
+               W(b)    .                       @ reset
+               W(b)    .                       @ undef
+               W(b)    .                       @ svc
+               W(b)    .                       @ pabort
+               W(b)    .                       @ dabort
+               W(b)    __enter_kernel          @ hyp
+               W(b)    .                       @ irq
+               W(b)    .                       @ fiq
+#endif /* CONFIG_ARM_VIRT_EXT */
+
+__enter_kernel:
+               mov     r0, #0                  @ must be 0
+ ARM(          mov     pc, r4  )               @ call kernel
+ THUMB(                bx      r4      )               @ entry point is always ARM
+
 reloc_code_end:
 
                .align
 
 #include <asm/assembler.h>
 #include <asm/virt.h>
 
+#ifndef ZIMAGE
 /*
  * For the kernel proper, we need to find out the CPU boot mode long after
  * boot, so we need to store it in a writable variable.
        strne   r7, [r5, r6]            @ record what happened and give up
        .endm
 
+#else  /* ZIMAGE */
+
+       .macro  store_primary_cpu_mode  reg1:req, reg2:req, reg3:req
+       .endm
+
+/*
+ * The zImage loader only runs on one CPU, so we don't bother with mult-CPU
+ * consistency checking:
+ */
+       .macro  compare_cpu_mode_with_primary mode, reg1, reg2, reg3
+       cmp     \mode, \mode
+       .endm
+
+#endif /* ZIMAGE */
+
 /*
  * Hypervisor stub installation functions.
  *
        bx      lr
 ENDPROC(__hyp_set_vectors)
 
+#ifndef ZIMAGE
 .align 2
 .L__boot_cpu_mode_offset:
        .long   __boot_cpu_mode - .
+#endif
 
 .align 5
 __hyp_stub_vectors: