u64 fpr[32];
        u64 fpscr;
 
+#ifdef CONFIG_SPE
+       ulong evr[32];
+       ulong spefscr;
+       ulong host_spefscr;
+       u64 acc;
+#endif
 #ifdef CONFIG_ALTIVEC
        vector128 vr[32];
        vector128 vscr;
 
 #define ESR_ILK                0x00100000      /* Instr. Cache Locking */
 #define ESR_PUO                0x00040000      /* Unimplemented Operation exception */
 #define ESR_BO         0x00020000      /* Byte Ordering */
+#define ESR_SPV                0x00000080      /* Signal Processing operation */
 
 /* Bit definitions related to the DBCR0. */
 #if defined(CONFIG_40x)
 
        DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
 #endif
 
+#if defined(CONFIG_KVM) && defined(CONFIG_SPE)
+       DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0]));
+       DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc));
+       DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr));
+       DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
+#endif
+
 #ifdef CONFIG_KVM_EXIT_TIMING
        DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
                                                arch.timing_exit.tv32.tbu));
 
  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  *
  * Copyright IBM Corp. 2007
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
  *
  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
        }
 }
 
+#ifdef CONFIG_SPE
+void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
+{
+       preempt_disable();
+       enable_kernel_spe();
+       kvmppc_save_guest_spe(vcpu);
+       vcpu->arch.shadow_msr &= ~MSR_SPE;
+       preempt_enable();
+}
+
+static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
+{
+       preempt_disable();
+       enable_kernel_spe();
+       kvmppc_load_guest_spe(vcpu);
+       vcpu->arch.shadow_msr |= MSR_SPE;
+       preempt_enable();
+}
+
+static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.shared->msr & MSR_SPE) {
+               if (!(vcpu->arch.shadow_msr & MSR_SPE))
+                       kvmppc_vcpu_enable_spe(vcpu);
+       } else if (vcpu->arch.shadow_msr & MSR_SPE) {
+               kvmppc_vcpu_disable_spe(vcpu);
+       }
+}
+#else
+static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
+{
+}
+#endif
+
+/* Helper function for "full" MSR writes. No need to call this if only EE is
+ * changing. */
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
+{
+       if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR))
+               kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
+
+       vcpu->arch.shared->msr = new_msr;
+
+       if (vcpu->arch.shared->msr & MSR_WE) {
+               kvm_vcpu_block(vcpu);
+               kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
+       };
+
+       kvmppc_vcpu_sync_spe(vcpu);
+}
+
 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
                                        unsigned int priority)
 {
                r = RESUME_GUEST;
                break;
 
-       case BOOKE_INTERRUPT_SPE_UNAVAIL:
-               kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
+#ifdef CONFIG_SPE
+       case BOOKE_INTERRUPT_SPE_UNAVAIL: {
+               if (vcpu->arch.shared->msr & MSR_SPE)
+                       kvmppc_vcpu_enable_spe(vcpu);
+               else
+                       kvmppc_booke_queue_irqprio(vcpu,
+                                                  BOOKE_IRQPRIO_SPE_UNAVAIL);
                r = RESUME_GUEST;
                break;
+       }
 
        case BOOKE_INTERRUPT_SPE_FP_DATA:
                kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
                kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
                r = RESUME_GUEST;
                break;
+#else
+       case BOOKE_INTERRUPT_SPE_UNAVAIL:
+               /*
+                * Guest wants SPE, but host kernel doesn't support it.  Send
+                * an "unimplemented operation" program check to the guest.
+                */
+               kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
+               r = RESUME_GUEST;
+               break;
+
+       /*
+        * These really should never happen without CONFIG_SPE,
+        * as we should never enable the real MSR[SPE] in the guest.
+        */
+       case BOOKE_INTERRUPT_SPE_FP_DATA:
+       case BOOKE_INTERRUPT_SPE_FP_ROUND:
+               printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
+                      __func__, exit_nr, vcpu->arch.pc);
+               run->hw.hardware_exit_reason = exit_nr;
+               r = RESUME_HOST;
+               break;
+#endif
 
        case BOOKE_INTERRUPT_DATA_STORAGE:
                kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
 
 
 extern unsigned long kvmppc_booke_handlers;
 
-/* Helper function for "full" MSR writes. No need to call this if only EE is
- * changing. */
-static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
-{
-       if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR))
-               kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
-
-       vcpu->arch.shared->msr = new_msr;
-
-       if (vcpu->arch.shared->msr & MSR_WE) {
-               kvm_vcpu_block(vcpu);
-               kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
-       };
-}
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
 
 int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                             unsigned int inst, int *advance);
 int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
 int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
 
+/* low-level asm code to transfer guest state */
+void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
+void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
+
+/* high-level function, manages flags, host state */
+void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
+
 #endif /* __KVM_BOOKE_H__ */
 
  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  *
  * Copyright IBM Corp. 2007
+ * Copyright 2011 Freescale Semiconductor, Inc.
  *
  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  */
 heavyweight_exit:
        /* Not returning to guest. */
 
+#ifdef CONFIG_SPE
+       /* save guest SPEFSCR and load host SPEFSCR */
+       mfspr   r9, SPRN_SPEFSCR
+       stw     r9, VCPU_SPEFSCR(r4)
+       lwz     r9, VCPU_HOST_SPEFSCR(r4)
+       mtspr   SPRN_SPEFSCR, r9
+#endif
+
        /* We already saved guest volatile register state; now save the
         * non-volatiles. */
        stw     r15, VCPU_GPR(r15)(r4)
        lwz     r30, VCPU_GPR(r30)(r4)
        lwz     r31, VCPU_GPR(r31)(r4)
 
+#ifdef CONFIG_SPE
+       /* save host SPEFSCR and load guest SPEFSCR */
+       mfspr   r3, SPRN_SPEFSCR
+       stw     r3, VCPU_HOST_SPEFSCR(r4)
+       lwz     r3, VCPU_SPEFSCR(r4)
+       mtspr   SPRN_SPEFSCR, r3
+#endif
+
 lightweight_exit:
        stw     r2, HOST_R2(r1)
 
        lwz     r3, VCPU_GPR(r3)(r4)
        lwz     r4, VCPU_GPR(r4)(r4)
        rfi
+
+#ifdef CONFIG_SPE
+_GLOBAL(kvmppc_save_guest_spe)
+       cmpi    0,r3,0
+       beqlr-
+       SAVE_32EVRS(0, r4, r3, VCPU_EVR)
+       evxor   evr6, evr6, evr6
+       evmwumiaa evr6, evr6, evr6
+       li      r4,VCPU_ACC
+       evstddx evr6, r4, r3            /* save acc */
+       blr
+
+_GLOBAL(kvmppc_load_guest_spe)
+       cmpi    0,r3,0
+       beqlr-
+       li      r4,VCPU_ACC
+       evlddx  evr6,r4,r3
+       evmra   evr6,evr6               /* load acc */
+       REST_32EVRS(0, r4, r3, VCPU_EVR)
+       blr
+#endif
 
 /*
- * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
  *
  * Author: Yu Liu, <yu.liu@freescale.com>
  *
 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
 {
        kvmppc_e500_tlb_put(vcpu);
+
+#ifdef CONFIG_SPE
+       if (vcpu->arch.shadow_msr & MSR_SPE)
+               kvmppc_vcpu_disable_spe(vcpu);
+#endif
 }
 
 int kvmppc_core_check_processor_compat(void)