vcpu->arch.guest_context.sstatus |= SR_SPP;
 }
 
+static inline int vcpu_redirect(struct kvm_vcpu *vcpu, struct kvm_cpu_trap *trap)
+{
+       int ret = -EFAULT;
+
+       if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) {
+               kvm_riscv_vcpu_trap_redirect(vcpu, trap);
+               ret = 1;
+       }
+       return ret;
+}
+
 /*
  * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
  * proper exit to userspace.
        run->exit_reason = KVM_EXIT_UNKNOWN;
        switch (trap->scause) {
        case EXC_INST_ILLEGAL:
+               kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ILLEGAL_INSN);
+               ret = vcpu_redirect(vcpu, trap);
+               break;
        case EXC_LOAD_MISALIGNED:
+               kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_LOAD);
+               ret = vcpu_redirect(vcpu, trap);
+               break;
        case EXC_STORE_MISALIGNED:
+               kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_STORE);
+               ret = vcpu_redirect(vcpu, trap);
+               break;
        case EXC_LOAD_ACCESS:
+               kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_LOAD);
+               ret = vcpu_redirect(vcpu, trap);
+               break;
        case EXC_STORE_ACCESS:
+               kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_STORE);
+               ret = vcpu_redirect(vcpu, trap);
+               break;
        case EXC_INST_ACCESS:
-               if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) {
-                       kvm_riscv_vcpu_trap_redirect(vcpu, trap);
-                       ret = 1;
-               }
+               ret = vcpu_redirect(vcpu, trap);
                break;
        case EXC_VIRTUAL_INST_FAULT:
                if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)