KVMTEST(vec);                                                   \
        _SOFTEN_TEST(EXC_HV)
 
+#define SOFTEN_TEST_HV_201(vec)                                                \
+       KVMTEST(vec);                                                   \
+       _SOFTEN_TEST(EXC_STD)
+
 #define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra)             \
        HMT_MEDIUM;                                                     \
        SET_SCRATCH0(r13);    /* save r13 */                            \
 
        unsigned long xics_phys;
        u64 dabr;
        u64 host_mmcr[3];
-       u32 host_pmc[6];
+       u32 host_pmc[8];
        u64 host_purr;
        u64 host_spurr;
        u64 host_dscr;
 
        u32 dbsr;
 
        u64 mmcr[3];
-       u32 pmc[6];
+       u32 pmc[8];
 
 #ifdef CONFIG_KVM_EXIT_TIMING
        struct mutex exit_timing_lock;
 
        DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
        /* paca */
        DEFINE(PACA_SIZE, sizeof(struct paca_struct));
+       DEFINE(PACA_LOCK_TOKEN, offsetof(struct paca_struct, lock_token));
        DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
        DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
        DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
 
                KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
        FTR_SECTION_ELSE
                _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
-                                           EXC_STD, SOFTEN_TEST_PR)
+                                           EXC_STD, SOFTEN_TEST_HV_201)
                KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
        ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 
 
          If unsure, say N.
 
 config KVM_BOOK3S_64_HV
-       bool "KVM support for POWER7 using hypervisor mode in host"
+       bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
        depends on KVM_BOOK3S_64
        ---help---
          Support running unmodified book3s_64 guest kernels in
-         virtual machines on POWER7 processors that have hypervisor
-         mode available to the host.
+         virtual machines on POWER7 and PPC970 processors that have
+         hypervisor mode available to the host.
 
          If you say Y here, KVM will use the hardware virtualization
          facilities of POWER7 (and later) processors, meaning that
          guest operating systems will run at full hardware speed
          using supervisor and user modes.  However, this also means
          that KVM is not usable under PowerVM (pHyp), is only usable
-         on POWER7 (or later) processors, and can only emulate
-         POWER5+, POWER6 and POWER7 processors.
-
-         This module provides access to the hardware capabilities through
-         a character device node named /dev/kvm.
+         on POWER7 (or later) processors and PPC970-family processors,
+         and cannot emulate a different processor from the host processor.
 
          If unsure, say N.
 
 
 #define VRMA_PAGE_ORDER        24
 #define VRMA_VSID      0x1ffffffUL     /* 1TB VSID reserved for VRMA */
 
+/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
+#define MAX_LPID_970   63
 #define NR_LPIDS       (LPID_RSVD + 1)
 unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
 
 
        kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
        kvm->arch.lpid = lpid;
-       kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
-       kvm->arch.host_lpid = mfspr(SPRN_LPID);
-       kvm->arch.host_lpcr = mfspr(SPRN_LPCR);
 
        pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
        return 0;
 
 int kvmppc_mmu_hv_init(void)
 {
-       if (!cpu_has_feature(CPU_FTR_HVMODE) ||
-           !cpu_has_feature(CPU_FTR_ARCH_206))
+       unsigned long host_lpid, rsvd_lpid;
+
+       if (!cpu_has_feature(CPU_FTR_HVMODE))
                return -EINVAL;
+
        memset(lpid_inuse, 0, sizeof(lpid_inuse));
-       set_bit(mfspr(SPRN_LPID), lpid_inuse);
-       set_bit(LPID_RSVD, lpid_inuse);
+
+       if (cpu_has_feature(CPU_FTR_ARCH_206)) {
+               host_lpid = mfspr(SPRN_LPID);   /* POWER7 */
+               rsvd_lpid = LPID_RSVD;
+       } else {
+               host_lpid = 0;                  /* PPC970 */
+               rsvd_lpid = MAX_LPID_970;
+       }
+
+       set_bit(host_lpid, lpid_inuse);
+       /* rsvd_lpid is reserved for use in partition switching */
+       set_bit(rsvd_lpid, lpid_inuse);
 
        return 0;
 }
 {
        struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
 
-       vcpu->arch.slb_nr = 32;         /* Assume POWER7 for now */
+       if (cpu_has_feature(CPU_FTR_ARCH_206))
+               vcpu->arch.slb_nr = 32;         /* POWER7 */
+       else
+               vcpu->arch.slb_nr = 64;
 
        mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
        mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
 
 
 int kvmppc_core_check_processor_compat(void)
 {
-       if (cpu_has_feature(CPU_FTR_HVMODE) &&
-           cpu_has_feature(CPU_FTR_ARCH_206))
+       if (cpu_has_feature(CPU_FTR_HVMODE))
                return 0;
        return -EIO;
 }
                return -EINTR;
        }
 
+       /* On PPC970, check that we have an RMA region */
+       if (!vcpu->kvm->arch.rma && cpu_has_feature(CPU_FTR_ARCH_201))
+               return -EPERM;
+
        kvm_run->exit_reason = 0;
        vcpu->arch.ret = RESUME_GUEST;
        vcpu->arch.trap = 0;
 }
 
 /* Work out RMLS (real mode limit selector) field value for a given RMA size.
-   Assumes POWER7. */
+   Assumes POWER7 or PPC970. */
 static inline int lpcr_rmls(unsigned long rma_size)
 {
        switch (rma_size) {
        case 32ul << 20:        /* 32 MB */
-               return 8;
+               if (cpu_has_feature(CPU_FTR_ARCH_206))
+                       return 8;       /* only supported on POWER7 */
+               return -1;
        case 64ul << 20:        /* 64 MB */
                return 3;
        case 128ul << 20:       /* 128 MB */
                    mem->userspace_addr == vma->vm_start)
                        ri = vma->vm_file->private_data;
                up_read(¤t->mm->mmap_sem);
+               if (!ri && cpu_has_feature(CPU_FTR_ARCH_201)) {
+                       pr_err("CPU requires an RMO\n");
+                       return -EINVAL;
+               }
        }
 
        if (ri) {
                atomic_inc(&ri->use_count);
                kvm->arch.rma = ri;
                kvm->arch.n_rma_pages = rma_size >> porder;
-               lpcr = kvm->arch.lpcr & ~(LPCR_VPM0 | LPCR_VRMA_L);
-               lpcr |= rmls << LPCR_RMLS_SH;
+
+               /* Update LPCR and RMOR */
+               lpcr = kvm->arch.lpcr;
+               if (cpu_has_feature(CPU_FTR_ARCH_201)) {
+                       /* PPC970; insert RMLS value (split field) in HID4 */
+                       lpcr &= ~((1ul << HID4_RMLS0_SH) |
+                                 (3ul << HID4_RMLS2_SH));
+                       lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
+                               ((rmls & 3) << HID4_RMLS2_SH);
+                       /* RMOR is also in HID4 */
+                       lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
+                               << HID4_RMOR_SH;
+               } else {
+                       /* POWER7 */
+                       lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
+                       lpcr |= rmls << LPCR_RMLS_SH;
+                       kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
+               }
                kvm->arch.lpcr = lpcr;
-               kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
                pr_info("Using RMO at %lx size %lx (LPCR = %lx)\n",
                        ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
        }
        kvm->arch.rma = NULL;
        kvm->arch.n_rma_pages = 0;
 
-       lpcr = kvm->arch.host_lpcr & (LPCR_PECE | LPCR_LPES);
-       lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
-               LPCR_VPM0 | LPCR_VRMA_L;
-       kvm->arch.lpcr = lpcr;
+       kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
 
+       if (cpu_has_feature(CPU_FTR_ARCH_201)) {
+               /* PPC970; HID4 is effectively the LPCR */
+               unsigned long lpid = kvm->arch.lpid;
+               kvm->arch.host_lpid = 0;
+               kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
+               lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
+               lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
+                       ((lpid & 0xf) << HID4_LPID5_SH);
+       } else {
+               /* POWER7; init LPCR for virtual RMA mode */
+               kvm->arch.host_lpid = mfspr(SPRN_LPID);
+               kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
+               lpcr &= LPCR_PECE | LPCR_LPES;
+               lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
+                       LPCR_VPM0 | LPCR_VRMA_L;
+       }
+       kvm->arch.lpcr = lpcr;
 
        return 0;
 
 
 static DEFINE_SPINLOCK(rma_lock);
 
 /* Work out RMLS (real mode limit selector) field value for a given RMA size.
-   Assumes POWER7. */
+   Assumes POWER7 or PPC970. */
 static inline int lpcr_rmls(unsigned long rma_size)
 {
        switch (rma_size) {
        case 32ul << 20:        /* 32 MB */
-               return 8;
+               if (cpu_has_feature(CPU_FTR_ARCH_206))
+                       return 8;       /* only supported on POWER7 */
+               return -1;
        case 64ul << 20:        /* 64 MB */
                return 3;
        case 128ul << 20:       /* 128 MB */
        void *rma;
        struct page *pg;
 
-       /* Only do this in HV mode */
-       if (!cpu_has_feature(CPU_FTR_HVMODE))
+       /* Only do this on PPC970 in HV mode */
+       if (!cpu_has_feature(CPU_FTR_HVMODE) ||
+           !cpu_has_feature(CPU_FTR_ARCH_201))
                return;
 
        if (!kvm_rma_size || !kvm_rma_count)
 
        SAVE_NVGPRS(r1)
 
        /* Save host DSCR */
+BEGIN_FTR_SECTION
        mfspr   r3, SPRN_DSCR
        std     r3, HSTATE_DSCR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
        /* Save host DABR */
        mfspr   r3, SPRN_DABR
        mfspr   r7, SPRN_PMC4
        mfspr   r8, SPRN_PMC5
        mfspr   r9, SPRN_PMC6
+BEGIN_FTR_SECTION
+       mfspr   r10, SPRN_PMC7
+       mfspr   r11, SPRN_PMC8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        stw     r3, HSTATE_PMC(r13)
        stw     r5, HSTATE_PMC + 4(r13)
        stw     r6, HSTATE_PMC + 8(r13)
        stw     r7, HSTATE_PMC + 12(r13)
        stw     r8, HSTATE_PMC + 16(r13)
        stw     r9, HSTATE_PMC + 20(r13)
+BEGIN_FTR_SECTION
+       stw     r10, HSTATE_PMC + 24(r13)
+       stw     r11, HSTATE_PMC + 28(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 31:
 
        /*
        add     r8,r8,r7
        std     r8,HSTATE_DECEXP(r13)
 
+       /*
+        * On PPC970, if the guest vcpu has an external interrupt pending,
+        * send ourselves an IPI so as to interrupt the guest once it
+        * enables interrupts.  (It must have interrupts disabled,
+        * otherwise we would already have delivered the interrupt.)
+        */
+BEGIN_FTR_SECTION
+       ld      r0, VCPU_PENDING_EXC(r4)
+       li      r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
+       oris    r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
+       and.    r0, r0, r7
+       beq     32f
+       mr      r31, r4
+       lhz     r3, PACAPACAINDEX(r13)
+       bl      smp_send_reschedule
+       nop
+       mr      r4, r31
+32:
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+
        /* Jump to partition switch code */
        bl      .kvmppc_hv_entry_trampoline
        nop
 
        /* only handle 4k, 64k and 16M pages for now */
        porder = 12;
        if (pteh & HPTE_V_LARGE) {
-               if ((ptel & 0xf000) == 0x1000) {
+               if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+                   (ptel & 0xf000) == 0x1000) {
                        /* 64k page */
                        porder = 16;
                } else if ((ptel & 0xff000) == 0) {
        va_low &= 0x7ff;
        if (v & HPTE_V_LARGE) {
                rb |= 1;                        /* L field */
-               if (r & 0xff000) {
+               if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+                   (r & 0xff000)) {
                        /* non-16MB large page, must be 64k */
                        /* (masks depend on page size) */
                        rb |= 0x1000;           /* page encoding in LP field */
 
        lwz     r7, VCPU_PMC + 12(r4)
        lwz     r8, VCPU_PMC + 16(r4)
        lwz     r9, VCPU_PMC + 20(r4)
+BEGIN_FTR_SECTION
+       lwz     r10, VCPU_PMC + 24(r4)
+       lwz     r11, VCPU_PMC + 28(r4)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        mtspr   SPRN_PMC1, r3
        mtspr   SPRN_PMC2, r5
        mtspr   SPRN_PMC3, r6
        mtspr   SPRN_PMC4, r7
        mtspr   SPRN_PMC5, r8
        mtspr   SPRN_PMC6, r9
+BEGIN_FTR_SECTION
+       mtspr   SPRN_PMC7, r10
+       mtspr   SPRN_PMC8, r11
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        ld      r3, VCPU_MMCR(r4)
        ld      r5, VCPU_MMCR + 8(r4)
        ld      r6, VCPU_MMCR + 16(r4)
        /* Load up FP, VMX and VSX registers */
        bl      kvmppc_load_fp
 
+BEGIN_FTR_SECTION
        /* Switch DSCR to guest value */
        ld      r5, VCPU_DSCR(r4)
        mtspr   SPRN_DSCR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
        /*
         * Set the decrementer to the guest decrementer.
        mtspr   SPRN_DABRX,r5
        mtspr   SPRN_DABR,r6
 
+BEGIN_FTR_SECTION
        /* Restore AMR and UAMOR, set AMOR to all 1s */
        ld      r5,VCPU_AMR(r4)
        ld      r6,VCPU_UAMOR(r4)
        mtspr   SPRN_AMR,r5
        mtspr   SPRN_UAMOR,r6
        mtspr   SPRN_AMOR,r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
        /* Clear out SLB */
        li      r6,0
        slbia
        ptesync
 
+BEGIN_FTR_SECTION
+       b       30f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+       /*
+        * POWER7 host -> guest partition switch code.
+        * We don't have to lock against concurrent tlbies,
+        * but we do have to coordinate across hardware threads.
+        */
        /* Increment entry count iff exit count is zero. */
        ld      r5,HSTATE_KVM_VCORE(r13)
        addi    r9,r5,VCORE_ENTRY_EXIT
        ld      r8,VCPU_SPURR(r4)
        mtspr   SPRN_PURR,r7
        mtspr   SPRN_SPURR,r8
+       b       31f
+
+       /*
+        * PPC970 host -> guest partition switch code.
+        * We have to lock against concurrent tlbies,
+        * using native_tlbie_lock to lock against host tlbies
+        * and kvm->arch.tlbie_lock to lock against guest tlbies.
+        * We also have to invalidate the TLB since its
+        * entries aren't tagged with the LPID.
+        */
+30:    ld      r9,VCPU_KVM(r4)         /* pointer to struct kvm */
+
+       /* first take native_tlbie_lock */
+       .section ".toc","aw"
+toc_tlbie_lock:
+       .tc     native_tlbie_lock[TC],native_tlbie_lock
+       .previous
+       ld      r3,toc_tlbie_lock@toc(2)
+       lwz     r8,PACA_LOCK_TOKEN(r13)
+24:    lwarx   r0,0,r3
+       cmpwi   r0,0
+       bne     24b
+       stwcx.  r8,0,r3
+       bne     24b
+       isync
+
+       ld      r7,KVM_LPCR(r9)         /* use kvm->arch.lpcr to store HID4 */
+       li      r0,0x18f
+       rotldi  r0,r0,HID4_LPID5_SH     /* all lpid bits in HID4 = 1 */
+       or      r0,r7,r0
+       ptesync
+       sync
+       mtspr   SPRN_HID4,r0            /* switch to reserved LPID */
+       isync
+       li      r0,0
+       stw     r0,0(r3)                /* drop native_tlbie_lock */
+
+       /* invalidate the whole TLB */
+       li      r0,256
+       mtctr   r0
+       li      r6,0
+25:    tlbiel  r6
+       addi    r6,r6,0x1000
+       bdnz    25b
+       ptesync
+
+       /* Take the guest's tlbie_lock */
+       addi    r3,r9,KVM_TLBIE_LOCK
+24:    lwarx   r0,0,r3
+       cmpwi   r0,0
+       bne     24b
+       stwcx.  r8,0,r3
+       bne     24b
+       isync
+       ld      r6,KVM_SDR1(r9)
+       mtspr   SPRN_SDR1,r6            /* switch to partition page table */
+
+       /* Set up HID4 with the guest's LPID etc. */
+       sync
+       mtspr   SPRN_HID4,r7
+       isync
+
+       /* drop the guest's tlbie_lock */
+       li      r0,0
+       stw     r0,0(r3)
+
+       /* Check if HDEC expires soon */
+       mfspr   r3,SPRN_HDEC
+       cmpwi   r3,10
+       li      r12,BOOK3S_INTERRUPT_HV_DECREMENTER
+       mr      r9,r4
+       blt     hdec_soon
+
+       /* Enable HDEC interrupts */
+       mfspr   r0,SPRN_HID0
+       li      r3,1
+       rldimi  r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
+       sync
+       mtspr   SPRN_HID0,r0
+       mfspr   r0,SPRN_HID0
+       mfspr   r0,SPRN_HID0
+       mfspr   r0,SPRN_HID0
+       mfspr   r0,SPRN_HID0
+       mfspr   r0,SPRN_HID0
+       mfspr   r0,SPRN_HID0
 
        /* Load up guest SLB entries */
-       lwz     r5,VCPU_SLB_MAX(r4)
+31:    lwz     r5,VCPU_SLB_MAX(r4)
        cmpwi   r5,0
        beq     9f
        mtctr   r5
 hcall_real_cont:
 
        /* Check for mediated interrupts (could be done earlier really ...) */
+BEGIN_FTR_SECTION
        cmpwi   r12,BOOK3S_INTERRUPT_EXTERNAL
        bne+    1f
        ld      r5,VCPU_KVM(r9)
        andi.   r0,r5,LPCR_MER
        bne     bounce_ext_interrupt
 1:
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
        /* Save DEC */
        mfspr   r5,SPRN_DEC
        /* Save HEIR (HV emulation assist reg) in last_inst
           if this is an HEI (HV emulation interrupt, e40) */
        li      r3,-1
+BEGIN_FTR_SECTION
        cmpwi   r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
        bne     11f
        mfspr   r3,SPRN_HEIR
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 11:    stw     r3,VCPU_LAST_INST(r9)
 
        /* Save more register state  */
        stw     r7, VCPU_DSISR(r9)
        std     r8, VCPU_CTR(r9)
        /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */
+BEGIN_FTR_SECTION
        cmpwi   r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
        beq     6f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 7:     std     r6, VCPU_FAULT_DAR(r9)
        stw     r7, VCPU_FAULT_DSISR(r9)
 
        /*
         * Save the guest PURR/SPURR
         */
+BEGIN_FTR_SECTION
        mfspr   r5,SPRN_PURR
        mfspr   r6,SPRN_SPURR
        ld      r7,VCPU_PURR(r9)
        add     r4,r4,r6
        mtspr   SPRN_PURR,r3
        mtspr   SPRN_SPURR,r4
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
 
        /* Clear out SLB */
        li      r5,0
        ptesync
 
 hdec_soon:
+BEGIN_FTR_SECTION
+       b       32f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+       /*
+        * POWER7 guest -> host partition switch code.
+        * We don't have to lock against tlbies but we do
+        * have to coordinate the hardware threads.
+        */
        /* Increment the threads-exiting-guest count in the 0xff00
           bits of vcore->entry_exit_count */
        lwsync
 16:    ld      r8,KVM_HOST_LPCR(r4)
        mtspr   SPRN_LPCR,r8
        isync
+       b       33f
+
+       /*
+        * PPC970 guest -> host partition switch code.
+        * We have to lock against concurrent tlbies, and
+        * we have to flush the whole TLB.
+        */
+32:    ld      r4,VCPU_KVM(r9)         /* pointer to struct kvm */
+
+       /* Take the guest's tlbie_lock */
+       lwz     r8,PACA_LOCK_TOKEN(r13)
+       addi    r3,r4,KVM_TLBIE_LOCK
+24:    lwarx   r0,0,r3
+       cmpwi   r0,0
+       bne     24b
+       stwcx.  r8,0,r3
+       bne     24b
+       isync
+
+       ld      r7,KVM_HOST_LPCR(r4)    /* use kvm->arch.host_lpcr for HID4 */
+       li      r0,0x18f
+       rotldi  r0,r0,HID4_LPID5_SH     /* all lpid bits in HID4 = 1 */
+       or      r0,r7,r0
+       ptesync
+       sync
+       mtspr   SPRN_HID4,r0            /* switch to reserved LPID */
+       isync
+       li      r0,0
+       stw     r0,0(r3)                /* drop guest tlbie_lock */
+
+       /* invalidate the whole TLB */
+       li      r0,256
+       mtctr   r0
+       li      r6,0
+25:    tlbiel  r6
+       addi    r6,r6,0x1000
+       bdnz    25b
+       ptesync
+
+       /* take native_tlbie_lock */
+       ld      r3,toc_tlbie_lock@toc(2)
+24:    lwarx   r0,0,r3
+       cmpwi   r0,0
+       bne     24b
+       stwcx.  r8,0,r3
+       bne     24b
+       isync
+
+       ld      r6,KVM_HOST_SDR1(r4)
+       mtspr   SPRN_SDR1,r6            /* switch to host page table */
+
+       /* Set up host HID4 value */
+       sync
+       mtspr   SPRN_HID4,r7
+       isync
+       li      r0,0
+       stw     r0,0(r3)                /* drop native_tlbie_lock */
+
+       lis     r8,0x7fff               /* MAX_INT@h */
+       mtspr   SPRN_HDEC,r8
+
+       /* Disable HDEC interrupts */
+       mfspr   r0,SPRN_HID0
+       li      r3,0
+       rldimi  r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
+       sync
+       mtspr   SPRN_HID0,r0
+       mfspr   r0,SPRN_HID0
+       mfspr   r0,SPRN_HID0
+       mfspr   r0,SPRN_HID0
+       mfspr   r0,SPRN_HID0
+       mfspr   r0,SPRN_HID0
+       mfspr   r0,SPRN_HID0
 
        /* load host SLB entries */
-       ld      r8,PACA_SLBSHADOWPTR(r13)
+33:    ld      r8,PACA_SLBSHADOWPTR(r13)
 
        .rept   SLB_NUM_BOLTED
        ld      r5,SLBSHADOW_SAVEAREA(r8)
        .endr
 
        /* Save and reset AMR and UAMOR before turning on the MMU */
+BEGIN_FTR_SECTION
        mfspr   r5,SPRN_AMR
        mfspr   r6,SPRN_UAMOR
        std     r5,VCPU_AMR(r9)
        std     r6,VCPU_UAMOR(r9)
        li      r6,0
        mtspr   SPRN_AMR,r6
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
        /* Restore host DABR and DABRX */
        ld      r5,HSTATE_DABR(r13)
        mtspr   SPRN_DABRX,r6
 
        /* Switch DSCR back to host value */
+BEGIN_FTR_SECTION
        mfspr   r8, SPRN_DSCR
        ld      r7, HSTATE_DSCR(r13)
        std     r8, VCPU_DSCR(r7)
        mtspr   SPRN_DSCR, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
        /* Save non-volatile GPRs */
        std     r14, VCPU_GPR(r14)(r9)
        mfspr   r6, SPRN_PMC4
        mfspr   r7, SPRN_PMC5
        mfspr   r8, SPRN_PMC6
+BEGIN_FTR_SECTION
+       mfspr   r10, SPRN_PMC7
+       mfspr   r11, SPRN_PMC8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        stw     r3, VCPU_PMC(r9)
        stw     r4, VCPU_PMC + 4(r9)
        stw     r5, VCPU_PMC + 8(r9)
        stw     r6, VCPU_PMC + 12(r9)
        stw     r7, VCPU_PMC + 16(r9)
        stw     r8, VCPU_PMC + 20(r9)
+BEGIN_FTR_SECTION
+       stw     r10, VCPU_PMC + 24(r9)
+       stw     r11, VCPU_PMC + 28(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 22:
        /* save FP state */
        mr      r3, r9
        bl      .kvmppc_save_fp
 
-       /* Secondary threads go off to take a nap */
+       /* Secondary threads go off to take a nap on POWER7 */
+BEGIN_FTR_SECTION
        lwz     r0,VCPU_PTID(r3)
        cmpwi   r0,0
        bne     secondary_nap
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
        /*
         * Reload DEC.  HDEC interrupts were disabled when
        lwz     r6, HSTATE_PMC + 12(r13)
        lwz     r8, HSTATE_PMC + 16(r13)
        lwz     r9, HSTATE_PMC + 20(r13)
+BEGIN_FTR_SECTION
+       lwz     r10, HSTATE_PMC + 24(r13)
+       lwz     r11, HSTATE_PMC + 28(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        mtspr   SPRN_PMC1, r3
        mtspr   SPRN_PMC2, r4
        mtspr   SPRN_PMC3, r5
        mtspr   SPRN_PMC4, r6
        mtspr   SPRN_PMC5, r8
        mtspr   SPRN_PMC6, r9
+BEGIN_FTR_SECTION
+       mtspr   SPRN_PMC7, r10
+       mtspr   SPRN_PMC8, r11
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        ld      r3, HSTATE_MMCR(r13)
        ld      r4, HSTATE_MMCR + 8(r13)
        ld      r5, HSTATE_MMCR + 16(r13)
        cmpwi   r12, BOOK3S_INTERRUPT_MACHINE_CHECK
 
        /* RFI into the highmem handler, or branch to interrupt handler */
-       mfmsr   r6
+12:    mfmsr   r6
        mtctr   r12
        li      r0, MSR_RI
        andc    r6, r6, r0
        beqctr
        RFI
 
-11:    mtspr   SPRN_HSRR0, r8
+11:
+BEGIN_FTR_SECTION
+       b       12b
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+       mtspr   SPRN_HSRR0, r8
        mtspr   SPRN_HSRR1, r7
        ba      0x500
 
 
                break;
        case KVM_CAP_PPC_RMA:
                r = 1;
+               /* PPC970 requires an RMA */
+               if (cpu_has_feature(CPU_FTR_ARCH_201))
+                       r = 2;
                break;
 #endif
        default:
 
 
 #define HPTE_LOCK_BIT 3
 
-static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
+DEFINE_RAW_SPINLOCK(native_tlbie_lock);
 
 static inline void __tlbie(unsigned long va, int psize, int ssize)
 {