PPC   | KVM_REG_PPC_ACOP     | 64
   PPC   | KVM_REG_PPC_VRSAVE   | 32
   PPC   | KVM_REG_PPC_LPCR     | 64
+  PPC   | KVM_REG_PPC_PPR      | 64
   PPC   | KVM_REG_PPC_TM_GPR0  | 64
           ...
   PPC   | KVM_REG_PPC_TM_GPR31 | 64
 
        ld      r10,area+EX_CFAR(r13);                                  \
        std     r10,HSTATE_CFAR(r13);                                   \
        END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947);          \
+       BEGIN_FTR_SECTION_NESTED(948)                                   \
+       ld      r10,area+EX_PPR(r13);                                   \
+       std     r10,HSTATE_PPR(r13);                                    \
+       END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948);    \
        ld      r10,area+EX_R10(r13);                                   \
        stw     r9,HSTATE_SCRATCH1(r13);                                \
        ld      r9,area+EX_R9(r13);                                     \
        ld      r10,area+EX_R10(r13);                                   \
        beq     89f;                                                    \
        stw     r9,HSTATE_SCRATCH1(r13);                        \
+       BEGIN_FTR_SECTION_NESTED(948)                                   \
+       ld      r9,area+EX_PPR(r13);                                    \
+       std     r9,HSTATE_PPR(r13);                                     \
+       END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948);    \
        ld      r9,area+EX_R9(r13);                                     \
        std     r12,HSTATE_SCRATCH0(r13);                       \
        li      r12,n;                                                  \
 
 #endif
 #ifdef CONFIG_PPC_BOOK3S_64
        u64 cfar;
+       u64 ppr;
 #endif
 };
 
 
        u32 ctrl;
        ulong dabr;
        ulong cfar;
+       ulong ppr;
 #endif
        u32 vrsave; /* also USPRG0 */
        u32 mmucr;
 
 
 #define KVM_REG_PPC_VRSAVE     (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
 #define KVM_REG_PPC_LPCR       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
+#define KVM_REG_PPC_PPR                (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6)
 
 /* Transactional Memory checkpointed state:
  * This is all GPRs, all VSX regs and a subset of SPRs
 
        DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
        DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
        DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
+       DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
        DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
        DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
        DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
 
 #ifdef CONFIG_PPC_BOOK3S_64
        HSTATE_FIELD(HSTATE_CFAR, cfar);
+       HSTATE_FIELD(HSTATE_PPR, ppr);
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
 #else /* CONFIG_PPC_BOOK3S */
 
        case KVM_REG_PPC_LPCR:
                *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
                break;
+       case KVM_REG_PPC_PPR:
+               *val = get_reg_val(id, vcpu->arch.ppr);
+               break;
        default:
                r = -EINVAL;
                break;
        case KVM_REG_PPC_LPCR:
                kvmppc_set_lpcr(vcpu, set_reg_val(id, *val));
                break;
+       case KVM_REG_PPC_PPR:
+               vcpu->arch.ppr = set_reg_val(id, *val);
+               break;
        default:
                r = -EINVAL;
                break;
 
        ld      r5, VCPU_CFAR(r4)
        mtspr   SPRN_CFAR, r5
 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+BEGIN_FTR_SECTION
+       ld      r0, VCPU_PPR(r4)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 
        ld      r5, VCPU_LR(r4)
        lwz     r6, VCPU_CR(r4)
        mtlr    r5
        mtcr    r6
 
-       ld      r0, VCPU_GPR(R0)(r4)
        ld      r1, VCPU_GPR(R1)(r4)
        ld      r2, VCPU_GPR(R2)(r4)
        ld      r3, VCPU_GPR(R3)(r4)
        ld      r12, VCPU_GPR(R12)(r4)
        ld      r13, VCPU_GPR(R13)(r4)
 
+BEGIN_FTR_SECTION
+       mtspr   SPRN_PPR, r0
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+       ld      r0, VCPU_GPR(R0)(r4)
        ld      r4, VCPU_GPR(R4)(r4)
 
        hrfid
        ld      r3, HSTATE_CFAR(r13)
        std     r3, VCPU_CFAR(r9)
 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+BEGIN_FTR_SECTION
+       ld      r4, HSTATE_PPR(r13)
+       std     r4, VCPU_PPR(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 
        /* Restore R1/R2 so we can handle faults */
        ld      r1, HSTATE_HOST_R1(r13)