u64 pfn;
        ulong slot;
        struct kvmppc_pte pte;
+       int pagesize;
 };
 
 struct kvmppc_vcpu_book3s {
 #define CONTEXT_GUEST          1
 #define CONTEXT_GUEST_END      2
 
-#define VSID_REAL      0x0fffffffffc00000ULL
-#define VSID_BAT       0x0fffffffffb00000ULL
+#define VSID_REAL      0x07ffffffffc00000ULL
+#define VSID_BAT       0x07ffffffffb00000ULL
+#define VSID_64K       0x0800000000000000ULL
 #define VSID_1T                0x1000000000000000ULL
 #define VSID_REAL_DR   0x2000000000000000ULL
 #define VSID_REAL_IR   0x4000000000000000ULL
 
        kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
 }
 
+#ifdef CONFIG_PPC_64K_PAGES
+static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
+{
+       ulong mp_ea = vcpu->arch.magic_page_ea;
+
+       return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) &&
+               (mp_ea >> SID_SHIFT) == esid;
+}
+#endif
+
 static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
                                             u64 *vsid)
 {
        struct kvmppc_slb *slb;
        u64 gvsid = esid;
        ulong mp_ea = vcpu->arch.magic_page_ea;
+       int pagesize = MMU_PAGE_64K;
 
        if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
                slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
                if (slb) {
                        gvsid = slb->vsid;
+                       pagesize = slb->base_page_size;
                        if (slb->tb) {
                                gvsid <<= SID_SHIFT_1T - SID_SHIFT;
                                gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
 
        switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
        case 0:
-               *vsid = VSID_REAL | esid;
+               gvsid = VSID_REAL | esid;
                break;
        case MSR_IR:
-               *vsid = VSID_REAL_IR | gvsid;
+               gvsid |= VSID_REAL_IR;
                break;
        case MSR_DR:
-               *vsid = VSID_REAL_DR | gvsid;
+               gvsid |= VSID_REAL_DR;
                break;
        case MSR_DR|MSR_IR:
                if (!slb)
                        goto no_slb;
 
-               *vsid = gvsid;
                break;
        default:
                BUG();
                break;
        }
 
+#ifdef CONFIG_PPC_64K_PAGES
+       /*
+        * Mark this as a 64k segment if the host is using
+        * 64k pages, the host MMU supports 64k pages and
+        * the guest segment page size is >= 64k,
+        * but not if this segment contains the magic page.
+        */
+       if (pagesize >= MMU_PAGE_64K &&
+           mmu_psize_defs[MMU_PAGE_64K].shift &&
+           !segment_contains_magic_page(vcpu, esid))
+               gvsid |= VSID_64K;
+#endif
+
        if (vcpu->arch.shared->msr & MSR_PR)
-               *vsid |= VSID_PR;
+               gvsid |= VSID_PR;
 
+       *vsid = gvsid;
        return 0;
 
 no_slb:
 
 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
 {
        ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
-                              MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M,
+                              pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M,
                               false);
 }
 
        int attempt = 0;
        struct kvmppc_sid_map *map;
        int r = 0;
+       int hpsize = MMU_PAGE_4K;
 
        /* Get host physical address for gpa */
        hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
                goto out;
        }
        hpaddr <<= PAGE_SHIFT;
-       hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
 
        /* and write the mapping ea -> hpa into the pt */
        vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
                goto out;
        }
 
-       vsid = map->host_vsid;
-       vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
+       vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
 
        if (!orig_pte->may_write)
                rflags |= HPTE_R_PP;
        else
                kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
 
-       hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M);
+       /*
+        * Use 64K pages if possible; otherwise, on 64K page kernels,
+        * we need to transfer 4 more bits from guest real to host real addr.
+        */
+       if (vsid & VSID_64K)
+               hpsize = MMU_PAGE_64K;
+       else
+               hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
+
+       hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
 
 map_again:
        hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
                }
 
        ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
-                                MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M);
+                                hpsize, hpsize, MMU_SEGSIZE_256M);
 
        if (ret < 0) {
                /* If we couldn't map a primary PTE, try a secondary */
                pte->host_vpn = vpn;
                pte->pte = *orig_pte;
                pte->pfn = hpaddr >> PAGE_SHIFT;
+               pte->pagesize = hpsize;
 
                kvmppc_mmu_hpte_cache_map(vcpu, pte);
        }
        slb_vsid &= ~SLB_VSID_KP;
        slb_esid |= slb_index;
 
+#ifdef CONFIG_PPC_64K_PAGES
+       /* Set host segment base page size to 64K if possible */
+       if (gvsid & VSID_64K)
+               slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
+#endif
+
        svcpu->slb[slb_index].esid = slb_esid;
        svcpu->slb[slb_index].vsid = slb_vsid;