bool *writable);
 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
                        unsigned long *rmap, long pte_index, int realmode);
-extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
+extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
                        unsigned long pte_index);
-void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
+void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
                        unsigned long pte_index);
 extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
                        unsigned long *nb_ret);
 
 /* These bits are reserved in the guest view of the HPTE */
 #define HPTE_GR_RESERVED       HPTE_GR_MODIFIED
 
-static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
+static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
 {
        unsigned long tmp, old;
+       __be64 be_lockbit, be_bits;
+
+       /*
+        * We load/store in native endian, but the HTAB is in big endian. If
+        * we byte swap all data we apply on the PTE we're implicitly correct
+        * again.
+        */
+       be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
+       be_bits = cpu_to_be64(bits);
 
        asm volatile("  ldarx   %0,0,%2\n"
                     "  and.    %1,%0,%3\n"
                     "  bne     2f\n"
-                    "  ori     %0,%0,%4\n"
+                    "  or      %0,%0,%4\n"
                     "  stdcx.  %0,0,%2\n"
                     "  beq+    2f\n"
                     "  mr      %1,%3\n"
                     "2:        isync"
                     : "=&r" (tmp), "=&r" (old)
-                    : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
+                    : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
                     : "cc", "memory");
        return old == 0;
 }
 
        unsigned long slb_v;
        unsigned long pp, key;
        unsigned long v, gr;
-       unsigned long *hptep;
+       __be64 *hptep;
        int index;
        int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
 
                preempt_enable();
                return -ENOENT;
        }
-       hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
-       v = hptep[0] & ~HPTE_V_HVLOCK;
+       hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
+       v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
        gr = kvm->arch.revmap[index].guest_rpte;
 
        /* Unlock the HPTE */
        asm volatile("lwsync" : : : "memory");
-       hptep[0] = v;
+       hptep[0] = cpu_to_be64(v);
        preempt_enable();
 
        gpte->eaddr = eaddr;
                                unsigned long ea, unsigned long dsisr)
 {
        struct kvm *kvm = vcpu->kvm;
-       unsigned long *hptep, hpte[3], r;
+       unsigned long hpte[3], r;
+       __be64 *hptep;
        unsigned long mmu_seq, psize, pte_size;
        unsigned long gpa_base, gfn_base;
        unsigned long gpa, gfn, hva, pfn;
        if (ea != vcpu->arch.pgfault_addr)
                return RESUME_GUEST;
        index = vcpu->arch.pgfault_index;
-       hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
+       hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
        rev = &kvm->arch.revmap[index];
        preempt_disable();
        while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
                cpu_relax();
-       hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
-       hpte[1] = hptep[1];
+       hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
+       hpte[1] = be64_to_cpu(hptep[1]);
        hpte[2] = r = rev->guest_rpte;
        asm volatile("lwsync" : : : "memory");
-       hptep[0] = hpte[0];
+       hptep[0] = cpu_to_be64(hpte[0]);
        preempt_enable();
 
        if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
        preempt_disable();
        while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
                cpu_relax();
-       if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] ||
-           rev->guest_rpte != hpte[2])
+       if ((be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK) != hpte[0] ||
+               be64_to_cpu(hptep[1]) != hpte[1] ||
+               rev->guest_rpte != hpte[2])
                /* HPTE has been changed under us; let the guest retry */
                goto out_unlock;
        hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
        rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
        r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
 
-       if (hptep[0] & HPTE_V_VALID) {
+       if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) {
                /* HPTE was previously valid, so we need to invalidate it */
                unlock_rmap(rmap);
-               hptep[0] |= HPTE_V_ABSENT;
+               hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
                kvmppc_invalidate_hpte(kvm, hptep, index);
                /* don't lose previous R and C bits */
-               r |= hptep[1] & (HPTE_R_R | HPTE_R_C);
+               r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
        } else {
                kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
        }
 
-       hptep[1] = r;
+       hptep[1] = cpu_to_be64(r);
        eieio();
-       hptep[0] = hpte[0];
+       hptep[0] = cpu_to_be64(hpte[0]);
        asm volatile("ptesync" : : : "memory");
        preempt_enable();
        if (page && hpte_is_writable(r))
        return ret;
 
  out_unlock:
-       hptep[0] &= ~HPTE_V_HVLOCK;
+       hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
        preempt_enable();
        goto out_put;
 }
 {
        struct revmap_entry *rev = kvm->arch.revmap;
        unsigned long h, i, j;
-       unsigned long *hptep;
+       __be64 *hptep;
        unsigned long ptel, psize, rcbits;
 
        for (;;) {
                 * rmap chain lock.
                 */
                i = *rmapp & KVMPPC_RMAP_INDEX;
-               hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
+               hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
                if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
                        /* unlock rmap before spinning on the HPTE lock */
                        unlock_rmap(rmapp);
-                       while (hptep[0] & HPTE_V_HVLOCK)
+                       while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
                                cpu_relax();
                        continue;
                }
 
                /* Now check and modify the HPTE */
                ptel = rev[i].guest_rpte;
-               psize = hpte_page_size(hptep[0], ptel);
-               if ((hptep[0] & HPTE_V_VALID) &&
+               psize = hpte_page_size(be64_to_cpu(hptep[0]), ptel);
+               if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
                    hpte_rpn(ptel, psize) == gfn) {
                        if (kvm->arch.using_mmu_notifiers)
-                               hptep[0] |= HPTE_V_ABSENT;
+                               hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
                        kvmppc_invalidate_hpte(kvm, hptep, i);
                        /* Harvest R and C */
-                       rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
+                       rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
                        *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
                        if (rcbits & ~rev[i].guest_rpte) {
                                rev[i].guest_rpte = ptel | rcbits;
                        }
                }
                unlock_rmap(rmapp);
-               hptep[0] &= ~HPTE_V_HVLOCK;
+               hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
        }
        return 0;
 }
 {
        struct revmap_entry *rev = kvm->arch.revmap;
        unsigned long head, i, j;
-       unsigned long *hptep;
+       __be64 *hptep;
        int ret = 0;
 
  retry:
 
        i = head = *rmapp & KVMPPC_RMAP_INDEX;
        do {
-               hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
+               hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
                j = rev[i].forw;
 
                /* If this HPTE isn't referenced, ignore it */
-               if (!(hptep[1] & HPTE_R_R))
+               if (!(be64_to_cpu(hptep[1]) & HPTE_R_R))
                        continue;
 
                if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
                        /* unlock rmap before spinning on the HPTE lock */
                        unlock_rmap(rmapp);
-                       while (hptep[0] & HPTE_V_HVLOCK)
+                       while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
                                cpu_relax();
                        goto retry;
                }
 
                /* Now check and modify the HPTE */
-               if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
+               if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
+                   (be64_to_cpu(hptep[1]) & HPTE_R_R)) {
                        kvmppc_clear_ref_hpte(kvm, hptep, i);
                        if (!(rev[i].guest_rpte & HPTE_R_R)) {
                                rev[i].guest_rpte |= HPTE_R_R;
                        }
                        ret = 1;
                }
-               hptep[0] &= ~HPTE_V_HVLOCK;
+               hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
        } while ((i = j) != head);
 
        unlock_rmap(rmapp);
                do {
                        hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
                        j = rev[i].forw;
-                       if (hp[1] & HPTE_R_R)
+                       if (be64_to_cpu(hp[1]) & HPTE_R_R)
                                goto out;
                } while ((i = j) != head);
        }
        unsigned long head, i, j;
        unsigned long n;
        unsigned long v, r;
-       unsigned long *hptep;
+       __be64 *hptep;
        int npages_dirty = 0;
 
  retry:
 
        i = head = *rmapp & KVMPPC_RMAP_INDEX;
        do {
-               hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
+               unsigned long hptep1;
+               hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
                j = rev[i].forw;
 
                /*
                 * Otherwise we need to do the tlbie even if C==0 in
                 * order to pick up any delayed writeback of C.
                 */
-               if (!(hptep[1] & HPTE_R_C) &&
-                   (!hpte_is_writable(hptep[1]) || vcpus_running(kvm)))
+               hptep1 = be64_to_cpu(hptep[1]);
+               if (!(hptep1 & HPTE_R_C) &&
+                   (!hpte_is_writable(hptep1) || vcpus_running(kvm)))
                        continue;
 
                if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
                        /* unlock rmap before spinning on the HPTE lock */
                        unlock_rmap(rmapp);
-                       while (hptep[0] & HPTE_V_HVLOCK)
+                       while (hptep[0] & cpu_to_be64(HPTE_V_HVLOCK))
                                cpu_relax();
                        goto retry;
                }
 
                /* Now check and modify the HPTE */
-               if (!(hptep[0] & HPTE_V_VALID))
+               if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID)))
                        continue;
 
                /* need to make it temporarily absent so C is stable */
-               hptep[0] |= HPTE_V_ABSENT;
+               hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
                kvmppc_invalidate_hpte(kvm, hptep, i);
-               v = hptep[0];
-               r = hptep[1];
+               v = be64_to_cpu(hptep[0]);
+               r = be64_to_cpu(hptep[1]);
                if (r & HPTE_R_C) {
-                       hptep[1] = r & ~HPTE_R_C;
+                       hptep[1] = cpu_to_be64(r & ~HPTE_R_C);
                        if (!(rev[i].guest_rpte & HPTE_R_C)) {
                                rev[i].guest_rpte |= HPTE_R_C;
                                note_hpte_modification(kvm, &rev[i]);
                }
                v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK);
                v |= HPTE_V_VALID;
-               hptep[0] = v;
+               hptep[0] = cpu_to_be64(v);
        } while ((i = j) != head);
 
        unlock_rmap(rmapp);
  * Returns 1 if this HPT entry has been modified or has pending
  * R/C bit changes.
  */
-static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp)
+static int hpte_dirty(struct revmap_entry *revp, __be64 *hptp)
 {
        unsigned long rcbits_unset;
 
 
        /* Also need to consider changes in reference and changed bits */
        rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
-       if ((hptp[0] & HPTE_V_VALID) && (hptp[1] & rcbits_unset))
+       if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) &&
+           (be64_to_cpu(hptp[1]) & rcbits_unset))
                return 1;
 
        return 0;
 }
 
-static long record_hpte(unsigned long flags, unsigned long *hptp,
+static long record_hpte(unsigned long flags, __be64 *hptp,
                        unsigned long *hpte, struct revmap_entry *revp,
                        int want_valid, int first_pass)
 {
                return 0;
 
        valid = 0;
-       if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) {
+       if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) {
                valid = 1;
                if ((flags & KVM_GET_HTAB_BOLTED_ONLY) &&
-                   !(hptp[0] & HPTE_V_BOLTED))
+                   !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED))
                        valid = 0;
        }
        if (valid != want_valid)
                preempt_disable();
                while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
                        cpu_relax();
-               v = hptp[0];
+               v = be64_to_cpu(hptp[0]);
 
                /* re-evaluate valid and dirty from synchronized HPTE value */
                valid = !!(v & HPTE_V_VALID);
 
                /* Harvest R and C into guest view if necessary */
                rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
-               if (valid && (rcbits_unset & hptp[1])) {
-                       revp->guest_rpte |= (hptp[1] & (HPTE_R_R | HPTE_R_C)) |
-                               HPTE_GR_MODIFIED;
+               if (valid && (rcbits_unset & be64_to_cpu(hptp[1]))) {
+                       revp->guest_rpte |= (be64_to_cpu(hptp[1]) &
+                               (HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED;
                        dirty = 1;
                }
 
                        revp->guest_rpte = r;
                }
                asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
-               hptp[0] &= ~HPTE_V_HVLOCK;
+               hptp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
                preempt_enable();
                if (!(valid == want_valid && (first_pass || dirty)))
                        ok = 0;
        }
-       hpte[0] = v;
-       hpte[1] = r;
+       hpte[0] = cpu_to_be64(v);
+       hpte[1] = cpu_to_be64(r);
        return ok;
 }
 
        struct kvm_htab_ctx *ctx = file->private_data;
        struct kvm *kvm = ctx->kvm;
        struct kvm_get_htab_header hdr;
-       unsigned long *hptp;
+       __be64 *hptp;
        struct revmap_entry *revp;
        unsigned long i, nb, nw;
        unsigned long __user *lbuf;
        flags = ctx->flags;
 
        i = ctx->index;
-       hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
+       hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
        revp = kvm->arch.revmap + i;
        lbuf = (unsigned long __user *)buf;
 
        unsigned long i, j;
        unsigned long v, r;
        unsigned long __user *lbuf;
-       unsigned long *hptp;
+       __be64 *hptp;
        unsigned long tmp[2];
        ssize_t nb;
        long int err, ret;
                    i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte)
                        break;
 
-               hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
+               hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
                lbuf = (unsigned long __user *)buf;
                for (j = 0; j < hdr.n_valid; ++j) {
                        err = -EFAULT;
                        lbuf += 2;
                        nb += HPTE_SIZE;
 
-                       if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT))
+                       if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
                                kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
                        err = -EIO;
                        ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
                }
 
                for (j = 0; j < hdr.n_invalid; ++j) {
-                       if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT))
+                       if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
                                kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
                        ++i;
                        hptp += 2;
 
        return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
 }
 
-static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
+static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
 {
        asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
-       hpte[0] = hpte_v;
+       hpte[0] = cpu_to_be64(hpte_v);
 }
 
 long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 {
        unsigned long i, pa, gpa, gfn, psize;
        unsigned long slot_fn, hva;
-       unsigned long *hpte;
+       __be64 *hpte;
        struct revmap_entry *rev;
        unsigned long g_ptel;
        struct kvm_memory_slot *memslot;
                return H_PARAMETER;
        if (likely((flags & H_EXACT) == 0)) {
                pte_index &= ~7UL;
-               hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+               hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
                for (i = 0; i < 8; ++i) {
-                       if ((*hpte & HPTE_V_VALID) == 0 &&
+                       if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
                            try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
                                          HPTE_V_ABSENT))
                                break;
                         */
                        hpte -= 16;
                        for (i = 0; i < 8; ++i) {
+                               u64 pte;
                                while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
                                        cpu_relax();
-                               if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
+                               pte = be64_to_cpu(*hpte);
+                               if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
                                        break;
-                               *hpte &= ~HPTE_V_HVLOCK;
+                               *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
                                hpte += 2;
                        }
                        if (i == 8)
                }
                pte_index += i;
        } else {
-               hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+               hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
                if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
                                   HPTE_V_ABSENT)) {
                        /* Lock the slot and check again */
+                       u64 pte;
+
                        while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
                                cpu_relax();
-                       if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
-                               *hpte &= ~HPTE_V_HVLOCK;
+                       pte = be64_to_cpu(*hpte);
+                       if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
+                               *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
                                return H_PTEG_FULL;
                        }
                }
                }
        }
 
-       hpte[1] = ptel;
+       hpte[1] = cpu_to_be64(ptel);
 
        /* Write the first HPTE dword, unlocking the HPTE and making it valid */
        eieio();
-       hpte[0] = pteh;
+       hpte[0] = cpu_to_be64(pteh);
        asm volatile("ptesync" : : : "memory");
 
        *pte_idx_ret = pte_index;
                        unsigned long pte_index, unsigned long avpn,
                        unsigned long *hpret)
 {
-       unsigned long *hpte;
+       __be64 *hpte;
        unsigned long v, r, rb;
        struct revmap_entry *rev;
+       u64 pte;
 
        if (pte_index >= kvm->arch.hpt_npte)
                return H_PARAMETER;
-       hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+       hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
        while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
                cpu_relax();
-       if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
-           ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
-           ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
-               hpte[0] &= ~HPTE_V_HVLOCK;
+       pte = be64_to_cpu(hpte[0]);
+       if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
+           ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
+           ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
+               hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
                return H_NOT_FOUND;
        }
 
        rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
-       v = hpte[0] & ~HPTE_V_HVLOCK;
+       v = pte & ~HPTE_V_HVLOCK;
        if (v & HPTE_V_VALID) {
-               hpte[0] &= ~HPTE_V_VALID;
-               rb = compute_tlbie_rb(v, hpte[1], pte_index);
+               u64 pte1;
+
+               pte1 = be64_to_cpu(hpte[1]);
+               hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
+               rb = compute_tlbie_rb(v, pte1, pte_index);
                do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
                /* Read PTE low word after tlbie to get final R/C values */
-               remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
+               remove_revmap_chain(kvm, pte_index, rev, v, pte1);
        }
        r = rev->guest_rpte & ~HPTE_GR_RESERVED;
        note_hpte_modification(kvm, rev);
 {
        struct kvm *kvm = vcpu->kvm;
        unsigned long *args = &vcpu->arch.gpr[4];
-       unsigned long *hp, *hptes[4], tlbrb[4];
+       __be64 *hp, *hptes[4];
+       unsigned long tlbrb[4];
        long int i, j, k, n, found, indexes[4];
        unsigned long flags, req, pte_index, rcbits;
        int global;
        long int ret = H_SUCCESS;
        struct revmap_entry *rev, *revs[4];
+       u64 hp0;
 
        global = global_invalidates(kvm, 0);
        for (i = 0; i < 4 && ret == H_SUCCESS; ) {
                                ret = H_PARAMETER;
                                break;
                        }
-                       hp = (unsigned long *)
-                               (kvm->arch.hpt_virt + (pte_index << 4));
+                       hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4));
                        /* to avoid deadlock, don't spin except for first */
                        if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
                                if (n)
                                        cpu_relax();
                        }
                        found = 0;
-                       if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
+                       hp0 = be64_to_cpu(hp[0]);
+                       if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
                                switch (flags & 3) {
                                case 0:         /* absolute */
                                        found = 1;
                                        break;
                                case 1:         /* andcond */
-                                       if (!(hp[0] & args[j + 1]))
+                                       if (!(hp0 & args[j + 1]))
                                                found = 1;
                                        break;
                                case 2:         /* AVPN */
-                                       if ((hp[0] & ~0x7fUL) == args[j + 1])
+                                       if ((hp0 & ~0x7fUL) == args[j + 1])
                                                found = 1;
                                        break;
                                }
                        }
                        if (!found) {
-                               hp[0] &= ~HPTE_V_HVLOCK;
+                               hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
                                args[j] = ((0x90 | flags) << 56) + pte_index;
                                continue;
                        }
                        rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
                        note_hpte_modification(kvm, rev);
 
-                       if (!(hp[0] & HPTE_V_VALID)) {
+                       if (!(hp0 & HPTE_V_VALID)) {
                                /* insert R and C bits from PTE */
                                rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
                                args[j] |= rcbits << (56 - 5);
                                continue;
                        }
 
-                       hp[0] &= ~HPTE_V_VALID;         /* leave it locked */
-                       tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index);
+                       /* leave it locked */
+                       hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
+                       tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]),
+                               be64_to_cpu(hp[1]), pte_index);
                        indexes[n] = j;
                        hptes[n] = hp;
                        revs[n] = rev;
                        pte_index = args[j] & ((1ul << 56) - 1);
                        hp = hptes[k];
                        rev = revs[k];
-                       remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]);
+                       remove_revmap_chain(kvm, pte_index, rev,
+                               be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
                        rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
                        args[j] |= rcbits << (56 - 5);
                        hp[0] = 0;
                      unsigned long va)
 {
        struct kvm *kvm = vcpu->kvm;
-       unsigned long *hpte;
+       __be64 *hpte;
        struct revmap_entry *rev;
        unsigned long v, r, rb, mask, bits;
+       u64 pte;
 
        if (pte_index >= kvm->arch.hpt_npte)
                return H_PARAMETER;
 
-       hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+       hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
        while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
                cpu_relax();
-       if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
-           ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
-               hpte[0] &= ~HPTE_V_HVLOCK;
+       pte = be64_to_cpu(hpte[0]);
+       if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
+           ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
+               hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
                return H_NOT_FOUND;
        }
 
-       v = hpte[0];
+       v = pte;
        bits = (flags << 55) & HPTE_R_PP0;
        bits |= (flags << 48) & HPTE_R_KEY_HI;
        bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
                rev->guest_rpte = r;
                note_hpte_modification(kvm, rev);
        }
-       r = (hpte[1] & ~mask) | bits;
+       r = (be64_to_cpu(hpte[1]) & ~mask) | bits;
 
        /* Update HPTE */
        if (v & HPTE_V_VALID) {
                rb = compute_tlbie_rb(v, r, pte_index);
-               hpte[0] = v & ~HPTE_V_VALID;
+               hpte[0] = cpu_to_be64(v & ~HPTE_V_VALID);
                do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
                /*
                 * If the host has this page as readonly but the guest
                        }
                }
        }
-       hpte[1] = r;
+       hpte[1] = cpu_to_be64(r);
        eieio();
-       hpte[0] = v & ~HPTE_V_HVLOCK;
+       hpte[0] = cpu_to_be64(v & ~HPTE_V_HVLOCK);
        asm volatile("ptesync" : : : "memory");
        return H_SUCCESS;
 }
                   unsigned long pte_index)
 {
        struct kvm *kvm = vcpu->kvm;
-       unsigned long *hpte, v, r;
+       __be64 *hpte;
+       unsigned long v, r;
        int i, n = 1;
        struct revmap_entry *rev = NULL;
 
        }
        rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
        for (i = 0; i < n; ++i, ++pte_index) {
-               hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
-               v = hpte[0] & ~HPTE_V_HVLOCK;
-               r = hpte[1];
+               hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+               v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
+               r = be64_to_cpu(hpte[1]);
                if (v & HPTE_V_ABSENT) {
                        v &= ~HPTE_V_ABSENT;
                        v |= HPTE_V_VALID;
        return H_SUCCESS;
 }
 
-void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
+void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
                        unsigned long pte_index)
 {
        unsigned long rb;
 
-       hptep[0] &= ~HPTE_V_VALID;
-       rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
+       hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
+       rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
+                             pte_index);
        do_tlbies(kvm, &rb, 1, 1, true);
 }
 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
 
-void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
+void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
                           unsigned long pte_index)
 {
        unsigned long rb;
        unsigned char rbyte;
 
-       rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
-       rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
+       rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
+                             pte_index);
+       rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
        /* modify only the second-last byte, which contains the ref bit */
        *((char *)hptep + 14) = rbyte;
        do_tlbies(kvm, &rb, 1, 1, false);
        unsigned long somask;
        unsigned long vsid, hash;
        unsigned long avpn;
-       unsigned long *hpte;
+       __be64 *hpte;
        unsigned long mask, val;
        unsigned long v, r;
 
        val |= avpn;
 
        for (;;) {
-               hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
+               hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7));
 
                for (i = 0; i < 16; i += 2) {
                        /* Read the PTE racily */
-                       v = hpte[i] & ~HPTE_V_HVLOCK;
+                       v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
 
                        /* Check valid/absent, hash, segment size and AVPN */
                        if (!(v & valid) || (v & mask) != val)
                        /* Lock the PTE and read it under the lock */
                        while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
                                cpu_relax();
-                       v = hpte[i] & ~HPTE_V_HVLOCK;
-                       r = hpte[i+1];
+                       v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
+                       r = be64_to_cpu(hpte[i+1]);
 
                        /*
                         * Check the HPTE again, including large page size
                                return (hash << 3) + (i >> 1);
 
                        /* Unlock and move on */
-                       hpte[i] = v;
+                       hpte[i] = cpu_to_be64(v);
                }
 
                if (val & HPTE_V_SECONDARY)
        struct kvm *kvm = vcpu->kvm;
        long int index;
        unsigned long v, r, gr;
-       unsigned long *hpte;
+       __be64 *hpte;
        unsigned long valid;
        struct revmap_entry *rev;
        unsigned long pp, key;
                        return status;  /* there really was no HPTE */
                return 0;               /* for prot fault, HPTE disappeared */
        }
-       hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
-       v = hpte[0] & ~HPTE_V_HVLOCK;
-       r = hpte[1];
+       hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
+       v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
+       r = be64_to_cpu(hpte[1]);
        rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
        gr = rev->guest_rpte;