bool upper, u32 val);
 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
 extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 
 extern u32 kvmppc_trampoline_lowmem;
 extern u32 kvmppc_trampoline_enter;
 
        }
 }
 
+pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+       ulong mp_pa = vcpu->arch.magic_page_pa;
+
+       /* Magic page override */
+       if (unlikely(mp_pa) &&
+           unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
+                    ((mp_pa & PAGE_MASK) & KVM_PAM))) {
+               ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
+               pfn_t pfn;
+
+               pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
+               get_page(pfn_to_page(pfn));
+               return pfn;
+       }
+
+       return gfn_to_pfn(vcpu->kvm, gfn);
+}
+
 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
  * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
  * emulate 32 bytes dcbz length.
 
 static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
+       ulong mp_pa = vcpu->arch.magic_page_pa;
+
+       if (unlikely(mp_pa) &&
+           unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
+               return 1;
+       }
+
        return kvm_is_visible_gfn(vcpu->kvm, gfn);
 }
 
        struct kvmppc_vcpu_book3s *vcpu_book3s;
        struct kvm_vcpu *vcpu;
        int err = -ENOMEM;
+       unsigned long p;
 
        vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
        if (!vcpu_book3s)
        if (err)
                goto free_shadow_vcpu;
 
-       vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
-       if (!vcpu->arch.shared)
+       p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
+       /* the real shared page fills the last 4k of our page */
+       vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
+       if (!p)
                goto uninit_vcpu;
 
        vcpu->arch.host_retip = kvm_return_point;
 {
        struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
 
-       free_page((unsigned long)vcpu->arch.shared);
+       free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
        kvm_vcpu_uninit(vcpu);
        kfree(vcpu_book3s->shadow_vcpu);
        vfree(vcpu_book3s);
 
                                      struct kvmppc_pte *pte, bool data)
 {
        int r;
+       ulong mp_ea = vcpu->arch.magic_page_ea;
 
        pte->eaddr = eaddr;
+
+       /* Magic page override */
+       if (unlikely(mp_ea) &&
+           unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
+           !(vcpu->arch.shared->msr & MSR_PR)) {
+               pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
+               pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff);
+               pte->raddr &= KVM_PAM;
+               pte->may_execute = true;
+               pte->may_read = true;
+               pte->may_write = true;
+
+               return 0;
+       }
+
        r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data);
        if (r < 0)
               r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true);
 
        struct hpte_cache *pte;
 
        /* Get host physical address for gpa */
-       hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
+       hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
        if (kvm_is_error_hva(hpaddr)) {
                printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
                                 orig_pte->eaddr);
 
        bool found = false;
        bool perm_err = false;
        int second = 0;
+       ulong mp_ea = vcpu->arch.magic_page_ea;
+
+       /* Magic page override */
+       if (unlikely(mp_ea) &&
+           unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
+           !(vcpu->arch.shared->msr & MSR_PR)) {
+               gpte->eaddr = eaddr;
+               gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
+               gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
+               gpte->raddr &= KVM_PAM;
+               gpte->may_execute = true;
+               gpte->may_read = true;
+               gpte->may_write = true;
+
+               return 0;
+       }
 
        slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr);
        if (!slbe)
        ulong ea = esid << SID_SHIFT;
        struct kvmppc_slb *slb;
        u64 gvsid = esid;
+       ulong mp_ea = vcpu->arch.magic_page_ea;
 
        if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
                slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
                break;
        case MSR_DR|MSR_IR:
                if (!slb)
-                       return -ENOENT;
+                       goto no_slb;
 
                *vsid = gvsid;
                break;
                *vsid |= VSID_PR;
 
        return 0;
+
+no_slb:
+       /* Catch magic page case */
+       if (unlikely(mp_ea) &&
+           unlikely(esid == (mp_ea >> SID_SHIFT)) &&
+           !(vcpu->arch.shared->msr & MSR_PR)) {
+               *vsid = VSID_REAL | esid;
+               return 0;
+       }
+
+       return -EINVAL;
 }
 
 static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
 
        struct kvmppc_sid_map *map;
 
        /* Get host physical address for gpa */
-       hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
+       hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
        if (kvm_is_error_hva(hpaddr)) {
                printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
                return -EINVAL;
        }
        hpaddr <<= PAGE_SHIFT;
-#if PAGE_SHIFT == 12
-#elif PAGE_SHIFT == 16
-       hpaddr |= orig_pte->raddr & 0xf000;
-#else
-#error Unknown page size
-#endif
+       hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
 
        /* and write the mapping ea -> hpa into the pt */
        vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);