__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
 }
 
+/**
+ * kvm_pmu_release_perf_event - remove the perf event
+ * @pmc: The PMU counter pointer
+ */
+static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
+{
+       if (pmc->perf_event) {
+               perf_event_disable(pmc->perf_event);
+               perf_event_release_kernel(pmc->perf_event);
+               pmc->perf_event = NULL;
+       }
+}
+
 /**
  * kvm_pmu_stop_counter - stop PMU counter
  * @pmc: The PMU counter pointer
                reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
                       ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
                __vcpu_sys_reg(vcpu, reg) = counter;
-               perf_event_disable(pmc->perf_event);
-               perf_event_release_kernel(pmc->perf_event);
-               pmc->perf_event = NULL;
+               kvm_pmu_release_perf_event(pmc);
        }
 }
 
        int i;
        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 
-       for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
-               struct kvm_pmc *pmc = &pmu->pmc[i];
-
-               if (pmc->perf_event) {
-                       perf_event_disable(pmc->perf_event);
-                       perf_event_release_kernel(pmc->perf_event);
-                       pmc->perf_event = NULL;
-               }
-       }
+       for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
+               kvm_pmu_release_perf_event(&pmu->pmc[i]);
 }
 
 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)