struct kvmhv_tb_accumulator *cur_activity;      /* What we're timing */
        u64     cur_tb_start;                   /* when it started */
 #ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
-       struct kvmhv_tb_accumulator rm_entry;   /* real-mode entry code */
-       struct kvmhv_tb_accumulator rm_intr;    /* real-mode intr handling */
-       struct kvmhv_tb_accumulator rm_exit;    /* real-mode exit code */
-       struct kvmhv_tb_accumulator guest_time; /* guest execution */
-       struct kvmhv_tb_accumulator cede_time;  /* time napping inside guest */
+       struct kvmhv_tb_accumulator vcpu_entry;
+       struct kvmhv_tb_accumulator vcpu_exit;
+       struct kvmhv_tb_accumulator in_guest;
+       struct kvmhv_tb_accumulator hcall;
+       struct kvmhv_tb_accumulator pg_fault;
+       struct kvmhv_tb_accumulator guest_entry;
+       struct kvmhv_tb_accumulator guest_exit;
 #else
        struct kvmhv_tb_accumulator rm_entry;   /* real-mode entry code */
        struct kvmhv_tb_accumulator rm_intr;    /* real-mode intr handling */
 
        size_t offset;
 } timings[] = {
 #ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
-       {"rm_entry",    offsetof(struct kvm_vcpu, arch.rm_entry)},
-       {"rm_intr",     offsetof(struct kvm_vcpu, arch.rm_intr)},
-       {"rm_exit",     offsetof(struct kvm_vcpu, arch.rm_exit)},
-       {"guest",       offsetof(struct kvm_vcpu, arch.guest_time)},
-       {"cede",        offsetof(struct kvm_vcpu, arch.cede_time)},
+       {"vcpu_entry",  offsetof(struct kvm_vcpu, arch.vcpu_entry)},
+       {"guest_entry", offsetof(struct kvm_vcpu, arch.guest_entry)},
+       {"in_guest",    offsetof(struct kvm_vcpu, arch.in_guest)},
+       {"guest_exit",  offsetof(struct kvm_vcpu, arch.guest_exit)},
+       {"vcpu_exit",   offsetof(struct kvm_vcpu, arch.vcpu_exit)},
+       {"hypercall",   offsetof(struct kvm_vcpu, arch.hcall)},
+       {"page_fault",  offsetof(struct kvm_vcpu, arch.pg_fault)},
 #else
        {"rm_entry",    offsetof(struct kvm_vcpu, arch.rm_entry)},
        {"rm_intr",     offsetof(struct kvm_vcpu, arch.rm_intr)},
        mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
        mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
        switch_pmu_to_guest(vcpu, &host_os_sprs);
+       accumulate_time(vcpu, &vcpu->arch.in_guest);
        trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
                                  __pa(&vcpu->arch.regs));
+       accumulate_time(vcpu, &vcpu->arch.guest_exit);
        kvmhv_restore_hv_return_state(vcpu, &hvregs);
        switch_pmu_to_host(vcpu, &host_os_sprs);
        vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
        struct kvm *kvm;
        unsigned long msr;
 
+       start_timing(vcpu, &vcpu->arch.vcpu_entry);
+
        if (!vcpu->arch.sane) {
                run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                return -EINVAL;
        vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
 
        do {
+               accumulate_time(vcpu, &vcpu->arch.guest_entry);
                if (cpu_has_feature(CPU_FTR_ARCH_300))
                        r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
                                                  vcpu->arch.vcore->lpcr);
                        r = kvmppc_run_vcpu(vcpu);
 
                if (run->exit_reason == KVM_EXIT_PAPR_HCALL) {
+                       accumulate_time(vcpu, &vcpu->arch.hcall);
+
                        if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) {
                                /*
                                 * These should have been caught reflected
                        trace_kvm_hcall_exit(vcpu, r);
                        kvmppc_core_prepare_to_enter(vcpu);
                } else if (r == RESUME_PAGE_FAULT) {
+                       accumulate_time(vcpu, &vcpu->arch.pg_fault);
                        srcu_idx = srcu_read_lock(&kvm->srcu);
                        r = kvmppc_book3s_hv_page_fault(vcpu,
                                vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
                                r = kvmppc_xics_rm_complete(vcpu, 0);
                }
        } while (is_kvmppc_resume_guest(r));
+       accumulate_time(vcpu, &vcpu->arch.vcpu_exit);
 
        vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
        atomic_dec(&kvm->arch.vcpus_running);
 
        srr_regs_clobbered();
 
+       end_timing(vcpu);
+
        return r;
 }
 
 
        WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_HV);
        WARN_ON_ONCE(!(vcpu->arch.shregs.msr & MSR_ME));
 
-       start_timing(vcpu, &vcpu->arch.rm_entry);
-
        vcpu->arch.ceded = 0;
 
        /* Save MSR for restore, with EE clear. */
        mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
        mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
 
-       accumulate_time(vcpu, &vcpu->arch.guest_time);
-
        switch_pmu_to_guest(vcpu, &host_os_sprs);
+       accumulate_time(vcpu, &vcpu->arch.in_guest);
+
        kvmppc_p9_enter_guest(vcpu);
-       switch_pmu_to_host(vcpu, &host_os_sprs);
 
-       accumulate_time(vcpu, &vcpu->arch.rm_intr);
+       accumulate_time(vcpu, &vcpu->arch.guest_exit);
+       switch_pmu_to_host(vcpu, &host_os_sprs);
 
        /* XXX: Could get these from r11/12 and paca exsave instead */
        vcpu->arch.shregs.srr0 = mfspr(SPRN_SRR0);
 #endif
        }
 
-       accumulate_time(vcpu, &vcpu->arch.rm_exit);
-
        /* Advance host PURR/SPURR by the amount used by guest */
        purr = mfspr(SPRN_PURR);
        spurr = mfspr(SPRN_SPURR);
                asm volatile(PPC_CP_ABORT);
 
 out:
-       end_timing(vcpu);
-
        return trap;
 }
 EXPORT_SYMBOL_GPL(kvmhv_vcpu_entry_p9);