#define __KVM_VCPU_RISCV_PMU_H
 
 #include <linux/perf/riscv_pmu.h>
+#include <asm/kvm_vcpu_insn.h>
 #include <asm/sbi.h>
 
 #ifdef CONFIG_RISCV_PMU_SBI
 struct kvm_pmu {
 };
 
+static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num,
+                                                unsigned long *val, unsigned long new_val,
+                                                unsigned long wr_mask)
+{
+       if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) {
+               *val = 0;
+               return KVM_INSN_CONTINUE_NEXT_SEPC;
+       } else {
+               return KVM_INSN_ILLEGAL_TRAP;
+       }
+}
+
 #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
-{.base = 0,    .count = 0,     .func = NULL },
+{.base = CSR_CYCLE,    .count = 3,     .func = kvm_riscv_vcpu_pmu_read_legacy },
 
 static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
 static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)