order = PPC_MIN_HPT_ORDER;
        }
 
-       kvm->arch.hpt_cma_alloc = 0;
+       kvm->arch.hpt.cma = 0;
        page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT));
        if (page) {
                hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
                memset((void *)hpt, 0, (1ul << order));
-               kvm->arch.hpt_cma_alloc = 1;
+               kvm->arch.hpt.cma = 1;
        }
 
        /* Lastly try successively smaller sizes from the page allocator */
        if (!hpt)
                return -ENOMEM;
 
-       kvm->arch.hpt_virt = hpt;
-       kvm->arch.hpt_order = order;
+       kvm->arch.hpt.virt = hpt;
+       kvm->arch.hpt.order = order;
        /* HPTEs are 2**4 bytes long */
-       kvm->arch.hpt_npte = 1ul << (order - 4);
+       kvm->arch.hpt.npte = 1ul << (order - 4);
        /* 128 (2**7) bytes in each HPTEG */
-       kvm->arch.hpt_mask = (1ul << (order - 7)) - 1;
+       kvm->arch.hpt.mask = (1ul << (order - 7)) - 1;
 
        atomic64_set(&kvm->arch.mmio_update, 0);
 
        /* Allocate reverse map array */
-       rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte);
+       rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt.npte);
        if (!rev) {
                pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
                goto out_freehpt;
        }
-       kvm->arch.revmap = rev;
+       kvm->arch.hpt.rev = rev;
        kvm->arch.sdr1 = __pa(hpt) | (order - 18);
 
        pr_info("KVM guest htab at %lx (order %ld), LPID %x\n",
        return 0;
 
  out_freehpt:
-       if (kvm->arch.hpt_cma_alloc)
+       if (kvm->arch.hpt.cma)
                kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT));
        else
                free_pages(hpt, order - PAGE_SHIFT);
                        goto out;
                }
        }
-       if (kvm->arch.hpt_virt) {
-               order = kvm->arch.hpt_order;
+       if (kvm->arch.hpt.virt) {
+               order = kvm->arch.hpt.order;
                /* Set the entire HPT to 0, i.e. invalid HPTEs */
-               memset((void *)kvm->arch.hpt_virt, 0, 1ul << order);
+               memset((void *)kvm->arch.hpt.virt, 0, 1ul << order);
                /*
                 * Reset all the reverse-mapping chains for all memslots
                 */
 
 void kvmppc_free_hpt(struct kvm *kvm)
 {
-       vfree(kvm->arch.revmap);
-       if (kvm->arch.hpt_cma_alloc)
-               kvm_free_hpt_cma(virt_to_page(kvm->arch.hpt_virt),
-                                1 << (kvm->arch.hpt_order - PAGE_SHIFT));
-       else if (kvm->arch.hpt_virt)
-               free_pages(kvm->arch.hpt_virt,
-                          kvm->arch.hpt_order - PAGE_SHIFT);
+       vfree(kvm->arch.hpt.rev);
+       if (kvm->arch.hpt.cma)
+               kvm_free_hpt_cma(virt_to_page(kvm->arch.hpt.virt),
+                                1 << (kvm->arch.hpt.order - PAGE_SHIFT));
+       else if (kvm->arch.hpt.virt)
+               free_pages(kvm->arch.hpt.virt,
+                          kvm->arch.hpt.order - PAGE_SHIFT);
 }
 
 /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
        if (npages > 1ul << (40 - porder))
                npages = 1ul << (40 - porder);
        /* Can't use more than 1 HPTE per HPTEG */
-       if (npages > kvm->arch.hpt_mask + 1)
-               npages = kvm->arch.hpt_mask + 1;
+       if (npages > kvm->arch.hpt.mask + 1)
+               npages = kvm->arch.hpt.mask + 1;
 
        hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
                HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
        for (i = 0; i < npages; ++i) {
                addr = i << porder;
                /* can't use hpt_hash since va > 64 bits */
-               hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask;
+               hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt.mask;
                /*
                 * We assume that the hash table is empty and no
                 * vcpus are using it at this stage.  Since we create
                preempt_enable();
                return -ENOENT;
        }
-       hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
+       hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
        v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
        if (cpu_has_feature(CPU_FTR_ARCH_300))
                v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1]));
-       gr = kvm->arch.revmap[index].guest_rpte;
+       gr = kvm->arch.hpt.rev[index].guest_rpte;
 
        unlock_hpte(hptep, orig_v);
        preempt_enable();
                }
        }
        index = vcpu->arch.pgfault_index;
-       hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
-       rev = &kvm->arch.revmap[index];
+       hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
+       rev = &kvm->arch.hpt.rev[index];
        preempt_disable();
        while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
                cpu_relax();
 static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
                           unsigned long gfn)
 {
-       struct revmap_entry *rev = kvm->arch.revmap;
+       struct revmap_entry *rev = kvm->arch.hpt.rev;
        unsigned long h, i, j;
        __be64 *hptep;
        unsigned long ptel, psize, rcbits;
                 * rmap chain lock.
                 */
                i = *rmapp & KVMPPC_RMAP_INDEX;
-               hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
+               hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
                if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
                        /* unlock rmap before spinning on the HPTE lock */
                        unlock_rmap(rmapp);
 static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
                         unsigned long gfn)
 {
-       struct revmap_entry *rev = kvm->arch.revmap;
+       struct revmap_entry *rev = kvm->arch.hpt.rev;
        unsigned long head, i, j;
        __be64 *hptep;
        int ret = 0;
 
        i = head = *rmapp & KVMPPC_RMAP_INDEX;
        do {
-               hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
+               hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
                j = rev[i].forw;
 
                /* If this HPTE isn't referenced, ignore it */
 static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
                              unsigned long gfn)
 {
-       struct revmap_entry *rev = kvm->arch.revmap;
+       struct revmap_entry *rev = kvm->arch.hpt.rev;
        unsigned long head, i, j;
        unsigned long *hp;
        int ret = 1;
        if (*rmapp & KVMPPC_RMAP_PRESENT) {
                i = head = *rmapp & KVMPPC_RMAP_INDEX;
                do {
-                       hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
+                       hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4));
                        j = rev[i].forw;
                        if (be64_to_cpu(hp[1]) & HPTE_R_R)
                                goto out;
  */
 static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
 {
-       struct revmap_entry *rev = kvm->arch.revmap;
+       struct revmap_entry *rev = kvm->arch.hpt.rev;
        unsigned long head, i, j;
        unsigned long n;
        unsigned long v, r;
        i = head = *rmapp & KVMPPC_RMAP_INDEX;
        do {
                unsigned long hptep1;
-               hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
+               hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
                j = rev[i].forw;
 
                /*
        flags = ctx->flags;
 
        i = ctx->index;
-       hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
-       revp = kvm->arch.revmap + i;
+       hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
+       revp = kvm->arch.hpt.rev + i;
        lbuf = (unsigned long __user *)buf;
 
        nb = 0;
 
                /* Skip uninteresting entries, i.e. clean on not-first pass */
                if (!first_pass) {
-                       while (i < kvm->arch.hpt_npte &&
+                       while (i < kvm->arch.hpt.npte &&
                               !hpte_dirty(revp, hptp)) {
                                ++i;
                                hptp += 2;
                hdr.index = i;
 
                /* Grab a series of valid entries */
-               while (i < kvm->arch.hpt_npte &&
+               while (i < kvm->arch.hpt.npte &&
                       hdr.n_valid < 0xffff &&
                       nb + HPTE_SIZE < count &&
                       record_hpte(flags, hptp, hpte, revp, 1, first_pass)) {
                        ++revp;
                }
                /* Now skip invalid entries while we can */
-               while (i < kvm->arch.hpt_npte &&
+               while (i < kvm->arch.hpt.npte &&
                       hdr.n_invalid < 0xffff &&
                       record_hpte(flags, hptp, hpte, revp, 0, first_pass)) {
                        /* found an invalid entry */
                }
 
                /* Check if we've wrapped around the hash table */
-               if (i >= kvm->arch.hpt_npte) {
+               if (i >= kvm->arch.hpt.npte) {
                        i = 0;
                        ctx->first_pass = 0;
                        break;
 
                err = -EINVAL;
                i = hdr.index;
-               if (i >= kvm->arch.hpt_npte ||
-                   i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte)
+               if (i >= kvm->arch.hpt.npte ||
+                   i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt.npte)
                        break;
 
-               hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
+               hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
                lbuf = (unsigned long __user *)buf;
                for (j = 0; j < hdr.n_valid; ++j) {
                        __be64 hpte_v;
 
        kvm = p->kvm;
        i = p->hpt_index;
-       hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
-       for (; len != 0 && i < kvm->arch.hpt_npte; ++i, hptp += 2) {
+       hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
+       for (; len != 0 && i < kvm->arch.hpt.npte; ++i, hptp += 2) {
                if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)))
                        continue;
 
                        cpu_relax();
                v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK;
                hr = be64_to_cpu(hptp[1]);
-               gr = kvm->arch.revmap[i].guest_rpte;
+               gr = kvm->arch.hpt.rev[i].guest_rpte;
                unlock_hpte(hptp, v);
                preempt_enable();
 
 
 
        if (*rmap & KVMPPC_RMAP_PRESENT) {
                i = *rmap & KVMPPC_RMAP_INDEX;
-               head = &kvm->arch.revmap[i];
+               head = &kvm->arch.hpt.rev[i];
                if (realmode)
                        head = real_vmalloc_addr(head);
-               tail = &kvm->arch.revmap[head->back];
+               tail = &kvm->arch.hpt.rev[head->back];
                if (realmode)
                        tail = real_vmalloc_addr(tail);
                rev->forw = i;
        lock_rmap(rmap);
 
        head = *rmap & KVMPPC_RMAP_INDEX;
-       next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
-       prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
+       next = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->forw]);
+       prev = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->back]);
        next->back = rev->back;
        prev->forw = rev->forw;
        if (head == pte_index) {
 
        /* Find and lock the HPTEG slot to use */
  do_insert:
-       if (pte_index >= kvm->arch.hpt_npte)
+       if (pte_index >= kvm->arch.hpt.npte)
                return H_PARAMETER;
        if (likely((flags & H_EXACT) == 0)) {
                pte_index &= ~7UL;
-               hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+               hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
                for (i = 0; i < 8; ++i) {
                        if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
                            try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
                }
                pte_index += i;
        } else {
-               hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+               hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
                if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
                                   HPTE_V_ABSENT)) {
                        /* Lock the slot and check again */
        }
 
        /* Save away the guest's idea of the second HPTE dword */
-       rev = &kvm->arch.revmap[pte_index];
+       rev = &kvm->arch.hpt.rev[pte_index];
        if (realmode)
                rev = real_vmalloc_addr(rev);
        if (rev) {
 
        if (kvm_is_radix(kvm))
                return H_FUNCTION;
-       if (pte_index >= kvm->arch.hpt_npte)
+       if (pte_index >= kvm->arch.hpt.npte)
                return H_PARAMETER;
-       hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+       hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
        while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
                cpu_relax();
        pte = orig_pte = be64_to_cpu(hpte[0]);
                return H_NOT_FOUND;
        }
 
-       rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+       rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
        v = pte & ~HPTE_V_HVLOCK;
        if (v & HPTE_V_VALID) {
                hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
                                break;
                        }
                        if (req != 1 || flags == 3 ||
-                           pte_index >= kvm->arch.hpt_npte) {
+                           pte_index >= kvm->arch.hpt.npte) {
                                /* parameter error */
                                args[j] = ((0xa0 | flags) << 56) + pte_index;
                                ret = H_PARAMETER;
                                break;
                        }
-                       hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4));
+                       hp = (__be64 *) (kvm->arch.hpt.virt + (pte_index << 4));
                        /* to avoid deadlock, don't spin except for first */
                        if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
                                if (n)
                        }
 
                        args[j] = ((0x80 | flags) << 56) + pte_index;
-                       rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+                       rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
                        note_hpte_modification(kvm, rev);
 
                        if (!(hp0 & HPTE_V_VALID)) {
 
        if (kvm_is_radix(kvm))
                return H_FUNCTION;
-       if (pte_index >= kvm->arch.hpt_npte)
+       if (pte_index >= kvm->arch.hpt.npte)
                return H_PARAMETER;
 
-       hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+       hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
        while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
                cpu_relax();
        v = pte_v = be64_to_cpu(hpte[0]);
        /* Update guest view of 2nd HPTE dword */
        mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
                HPTE_R_KEY_HI | HPTE_R_KEY_LO;
-       rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+       rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
        if (rev) {
                r = (rev->guest_rpte & ~mask) | bits;
                rev->guest_rpte = r;
 
        if (kvm_is_radix(kvm))
                return H_FUNCTION;
-       if (pte_index >= kvm->arch.hpt_npte)
+       if (pte_index >= kvm->arch.hpt.npte)
                return H_PARAMETER;
        if (flags & H_READ_4) {
                pte_index &= ~3;
                n = 4;
        }
-       rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+       rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
        for (i = 0; i < n; ++i, ++pte_index) {
-               hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+               hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
                v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
                r = be64_to_cpu(hpte[1]);
                if (cpu_has_feature(CPU_FTR_ARCH_300)) {
 
        if (kvm_is_radix(kvm))
                return H_FUNCTION;
-       if (pte_index >= kvm->arch.hpt_npte)
+       if (pte_index >= kvm->arch.hpt.npte)
                return H_PARAMETER;
 
-       rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
-       hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+       rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
+       hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
        while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
                cpu_relax();
        v = be64_to_cpu(hpte[0]);
 
        if (kvm_is_radix(kvm))
                return H_FUNCTION;
-       if (pte_index >= kvm->arch.hpt_npte)
+       if (pte_index >= kvm->arch.hpt.npte)
                return H_PARAMETER;
 
-       rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
-       hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+       rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
+       hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
        while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
                cpu_relax();
        v = be64_to_cpu(hpte[0]);
                somask = (1UL << 28) - 1;
                vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
        }
-       hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask;
+       hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt.mask;
        avpn = slb_v & ~(somask >> 16); /* also includes B */
        avpn |= (eaddr & somask) >> 16;
 
        val |= avpn;
 
        for (;;) {
-               hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7));
+               hpte = (__be64 *)(kvm->arch.hpt.virt + (hash << 7));
 
                for (i = 0; i < 16; i += 2) {
                        /* Read the PTE racily */
                if (val & HPTE_V_SECONDARY)
                        break;
                val |= HPTE_V_SECONDARY;
-               hash = hash ^ kvm->arch.hpt_mask;
+               hash = hash ^ kvm->arch.hpt.mask;
        }
        return -1;
 }
                                return status;  /* there really was no HPTE */
                        return 0;       /* for prot fault, HPTE disappeared */
                }
-               hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
+               hpte = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
                v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
                r = be64_to_cpu(hpte[1]);
                if (cpu_has_feature(CPU_FTR_ARCH_300)) {
                        v = hpte_new_to_old_v(v, r);
                        r = hpte_new_to_old_r(r);
                }
-               rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
+               rev = real_vmalloc_addr(&kvm->arch.hpt.rev[index]);
                gr = rev->guest_rpte;
 
                unlock_hpte(hpte, orig_v);