struct kvmppc_vcpu_book3s {
        struct kvm_vcpu vcpu;
-       struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
+       struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
        struct kvmppc_sid_map sid_map[SID_MAP_NUM];
        struct kvmppc_slb slb[64];
        struct {
 }
 
 extern void kvm_return_point(void);
+static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu);
+
+static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+{
+       if ( num < 14 ) {
+               to_svcpu(vcpu)->gpr[num] = val;
+               to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
+       } else
+               vcpu->arch.gpr[num] = val;
+}
+
+static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+{
+       if ( num < 14 )
+               return to_svcpu(vcpu)->gpr[num];
+       else
+               return vcpu->arch.gpr[num];
+}
+
+static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+{
+       to_svcpu(vcpu)->cr = val;
+       to_book3s(vcpu)->shadow_vcpu->cr = val;
+}
+
+static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+{
+       return to_svcpu(vcpu)->cr;
+}
+
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+{
+       to_svcpu(vcpu)->xer = val;
+       to_book3s(vcpu)->shadow_vcpu->xer = val;
+}
+
+static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+{
+       return to_svcpu(vcpu)->xer;
+}
+
+static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
+{
+       to_svcpu(vcpu)->ctr = val;
+}
+
+static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
+{
+       return to_svcpu(vcpu)->ctr;
+}
+
+static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
+{
+       to_svcpu(vcpu)->lr = val;
+}
+
+static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
+{
+       return to_svcpu(vcpu)->lr;
+}
+
+static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
+{
+       to_svcpu(vcpu)->pc = val;
+}
+
+static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
+{
+       return to_svcpu(vcpu)->pc;
+}
+
+static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+{
+       ulong pc = kvmppc_get_pc(vcpu);
+       struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
+
+       /* Load the instruction manually if it failed to do so in the
+        * exit path */
+       if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
+               kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
+
+       return svcpu->last_inst;
+}
+
+static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
+{
+       return to_svcpu(vcpu)->fault_dar;
+}
 
 /* Magic register values loaded into r3 and r4 before the 'sc' assembly
  * instruction for the OSI hypercalls */
 
 #define INS_DCBZ                       0x7c0007ec
 
+/* Also add subarch specific defines */
+
+#ifdef CONFIG_PPC_BOOK3S_32
+#include <asm/kvm_book3s_32.h>
+#else
+#include <asm/kvm_book3s_64.h>
+#endif
+
 #endif /* __ASM_KVM_BOOK3S_H__ */
 
 
 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
-       memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb));
-       memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu,
+#ifdef CONFIG_PPC_BOOK3S_64
+       memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
+       memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
               sizeof(get_paca()->shadow_vcpu));
-       get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max;
+       to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_32
+       current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
+#endif
 }
 
 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
 {
-       memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb));
-       memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
+#ifdef CONFIG_PPC_BOOK3S_64
+       memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
+       memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
               sizeof(get_paca()->shadow_vcpu));
-       to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max;
+       to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
+#endif
 
        kvmppc_giveup_ext(vcpu, MSR_FP);
        kvmppc_giveup_ext(vcpu, MSR_VEC);
                                              VSID_SPLIT_MASK);
 
                kvmppc_mmu_flush_segments(vcpu);
-               kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc);
+               kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
        }
 
        /* Preload FPU if it's enabled */
 
 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
 {
-       vcpu->arch.srr0 = vcpu->arch.pc;
+       vcpu->arch.srr0 = kvmppc_get_pc(vcpu);
        vcpu->arch.srr1 = vcpu->arch.msr | flags;
-       vcpu->arch.pc = to_book3s(vcpu)->hior + vec;
+       kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
        vcpu->arch.mmu.reset_msr(vcpu);
 }
 
 
        if (page_found == -ENOENT) {
                /* Page not found in guest PTE entries */
-               vcpu->arch.dear = vcpu->arch.fault_dear;
-               to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
-               vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL);
+               vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
+               to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
+               vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
                kvmppc_book3s_queue_irqprio(vcpu, vec);
        } else if (page_found == -EPERM) {
                /* Storage protection */
-               vcpu->arch.dear = vcpu->arch.fault_dear;
-               to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
+               vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
+               to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
                to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
-               vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL);
+               vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
                kvmppc_book3s_queue_irqprio(vcpu, vec);
        } else if (page_found == -EINVAL) {
                /* Page not found in guest SLB */
-               vcpu->arch.dear = vcpu->arch.fault_dear;
+               vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
                kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
        } else if (!is_mmio &&
                   kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
 
 static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
 {
-       ulong srr0 = vcpu->arch.pc;
+       ulong srr0 = kvmppc_get_pc(vcpu);
+       u32 last_inst = kvmppc_get_last_inst(vcpu);
        int ret;
 
-       ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &vcpu->arch.last_inst, false);
+       ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
        if (ret == -ENOENT) {
                vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1);
                vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0);
        run->ready_for_interrupt_injection = 1;
 #ifdef EXIT_DEBUG
        printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n",
-               exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear,
-               kvmppc_get_dec(vcpu), vcpu->arch.msr);
+               exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
+               kvmppc_get_dec(vcpu), to_svcpu(vcpu)->shadow_srr1);
 #elif defined (EXIT_DEBUG_SIMPLE)
        if ((exit_nr != 0x900) && (exit_nr != 0x500))
                printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
-                       exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear,
+                       exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
                        vcpu->arch.msr);
 #endif
        kvm_resched(vcpu);
        case BOOK3S_INTERRUPT_INST_STORAGE:
                vcpu->stat.pf_instruc++;
                /* only care about PTEG not found errors, but leave NX alone */
-               if (vcpu->arch.shadow_srr1 & 0x40000000) {
-                       r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr);
+               if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
+                       r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
                        vcpu->stat.sp_instruc++;
                } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
                          (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
                         *     so we can't use the NX bit inside the guest. Let's cross our fingers,
                         *     that no guest that needs the dcbz hack does NX.
                         */
-                       kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
+                       kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFULL);
                        r = RESUME_GUEST;
                } else {
-                       vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000;
+                       vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
                        kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
-                       kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
+                       kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFULL);
                        r = RESUME_GUEST;
                }
                break;
        case BOOK3S_INTERRUPT_DATA_STORAGE:
+       {
+               ulong dar = kvmppc_get_fault_dar(vcpu);
                vcpu->stat.pf_storage++;
                /* The only case we need to handle is missing shadow PTEs */
-               if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) {
-                       r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr);
+               if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
+                       r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
                } else {
-                       vcpu->arch.dear = vcpu->arch.fault_dear;
-                       to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
+                       vcpu->arch.dear = dar;
+                       to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
                        kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
                        kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL);
                        r = RESUME_GUEST;
                }
                break;
+       }
        case BOOK3S_INTERRUPT_DATA_SEGMENT:
-               if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) {
-                       vcpu->arch.dear = vcpu->arch.fault_dear;
+               if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
+                       vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
                        kvmppc_book3s_queue_irqprio(vcpu,
                                BOOK3S_INTERRUPT_DATA_SEGMENT);
                }
                r = RESUME_GUEST;
                break;
        case BOOK3S_INTERRUPT_INST_SEGMENT:
-               if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) {
+               if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
                        kvmppc_book3s_queue_irqprio(vcpu,
                                BOOK3S_INTERRUPT_INST_SEGMENT);
                }
                ulong flags;
 
 program_interrupt:
-               flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
+               flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
 
                if (vcpu->arch.msr & MSR_PR) {
 #ifdef EXIT_DEBUG
-                       printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst);
+                       printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
 #endif
-                       if ((vcpu->arch.last_inst & 0xff0007ff) !=
+                       if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
                            (INS_DCBZ & 0xfffffff7)) {
                                kvmppc_core_queue_program(vcpu, flags);
                                r = RESUME_GUEST;
                        break;
                case EMULATE_FAIL:
                        printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
-                              __func__, vcpu->arch.pc, vcpu->arch.last_inst);
+                              __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
                        kvmppc_core_queue_program(vcpu, flags);
                        r = RESUME_GUEST;
                        break;
        case BOOK3S_INTERRUPT_ALIGNMENT:
                if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
                        to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu,
-                               vcpu->arch.last_inst);
+                               kvmppc_get_last_inst(vcpu));
                        vcpu->arch.dear = kvmppc_alignment_dar(vcpu,
-                               vcpu->arch.last_inst);
+                               kvmppc_get_last_inst(vcpu));
                        kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
                }
                r = RESUME_GUEST;
        default:
                /* Ugh - bork here! What did we get? */
                printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
-                       exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1);
+                       exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
                r = RESUME_HOST;
                BUG();
                break;
        }
 
 #ifdef EXIT_DEBUG
-       printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r);
+       printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, kvmppc_get_pc(vcpu), r);
 #endif
 
        return r;
 
        vcpu_load(vcpu);
 
-       regs->pc = vcpu->arch.pc;
+       regs->pc = kvmppc_get_pc(vcpu);
        regs->cr = kvmppc_get_cr(vcpu);
-       regs->ctr = vcpu->arch.ctr;
-       regs->lr = vcpu->arch.lr;
+       regs->ctr = kvmppc_get_ctr(vcpu);
+       regs->lr = kvmppc_get_lr(vcpu);
        regs->xer = kvmppc_get_xer(vcpu);
        regs->msr = vcpu->arch.msr;
        regs->srr0 = vcpu->arch.srr0;
 
        vcpu_load(vcpu);
 
-       vcpu->arch.pc = regs->pc;
+       kvmppc_set_pc(vcpu, regs->pc);
        kvmppc_set_cr(vcpu, regs->cr);
-       vcpu->arch.ctr = regs->ctr;
-       vcpu->arch.lr = regs->lr;
+       kvmppc_set_ctr(vcpu, regs->ctr);
+       kvmppc_set_lr(vcpu, regs->lr);
        kvmppc_set_xer(vcpu, regs->xer);
        kvmppc_set_msr(vcpu, regs->msr);
        vcpu->arch.srr0 = regs->srr0;
 {
        struct kvmppc_vcpu_book3s *vcpu_book3s;
        struct kvm_vcpu *vcpu;
-       int err;
+       int err = -ENOMEM;
 
        vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
-       if (!vcpu_book3s) {
-               err = -ENOMEM;
+       if (!vcpu_book3s)
                goto out;
-       }
+
        memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s));
 
+       vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
+               kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
+       if (!vcpu_book3s->shadow_vcpu)
+               goto free_vcpu;
+
        vcpu = &vcpu_book3s->vcpu;
        err = kvm_vcpu_init(vcpu, kvm, id);
        if (err)
-               goto free_vcpu;
+               goto free_shadow_vcpu;
 
        vcpu->arch.host_retip = kvm_return_point;
        vcpu->arch.host_msr = mfmsr();
 
        err = __init_new_context();
        if (err < 0)
-               goto free_vcpu;
+               goto free_shadow_vcpu;
        vcpu_book3s->context_id = err;
 
        vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1;
 
        return vcpu;
 
+free_shadow_vcpu:
+       kfree(vcpu_book3s->shadow_vcpu);
 free_vcpu:
        vfree(vcpu_book3s);
 out:
 
        __destroy_context(vcpu_book3s->context_id);
        kvm_vcpu_uninit(vcpu);
+       kfree(vcpu_book3s->shadow_vcpu);
        vfree(vcpu_book3s);
 }