The find_arch_event() returns a "unsigned int" value,
which is used by the pmc_reprogram_counter() to
program a PERF_TYPE_HARDWARE type perf_event.
The returned value is actually the kernel defined generic
perf_hw_id, let's rename it to pmc_perf_hw_id() with simpler
incoming parameters for better self-explanation.
Signed-off-by: Like Xu <likexu@tencent.com>
Message-Id: <
20211130074221.93635-3-likexu@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
 {
        unsigned config, type = PERF_TYPE_RAW;
-       u8 event_select, unit_mask;
        struct kvm *kvm = pmc->vcpu->kvm;
        struct kvm_pmu_event_filter *filter;
        int i;
        if (!allow_event)
                return;
 
-       event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
-       unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
-
        if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
                          ARCH_PERFMON_EVENTSEL_INV |
                          ARCH_PERFMON_EVENTSEL_CMASK |
                          HSW_IN_TX |
                          HSW_IN_TX_CHECKPOINTED))) {
-               config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc),
-                                                     event_select,
-                                                     unit_mask);
+               config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
                if (config != PERF_COUNT_HW_MAX)
                        type = PERF_TYPE_HARDWARE;
        }
 
 };
 
 struct kvm_pmu_ops {
-       unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
-                                   u8 unit_mask);
+       unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
        unsigned (*find_fixed_event)(int idx);
        bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
        struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
 
        return &pmu->gp_counters[msr_to_index(msr)];
 }
 
-static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
-                                   u8 event_select,
-                                   u8 unit_mask)
+static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
 {
+       u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
+       u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
        int i;
 
        for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
 }
 
 struct kvm_pmu_ops amd_pmu_ops = {
-       .find_arch_event = amd_find_arch_event,
+       .pmc_perf_hw_id = amd_pmc_perf_hw_id,
        .find_fixed_event = amd_find_fixed_event,
        .pmc_is_enabled = amd_pmc_is_enabled,
        .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
 
                reprogram_counter(pmu, bit);
 }
 
-static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
-                                     u8 event_select,
-                                     u8 unit_mask)
+static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
 {
+       struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+       u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
+       u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
        int i;
 
        for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
 }
 
 struct kvm_pmu_ops intel_pmu_ops = {
-       .find_arch_event = intel_find_arch_event,
+       .pmc_perf_hw_id = intel_pmc_perf_hw_id,
        .find_fixed_event = intel_find_fixed_event,
        .pmc_is_enabled = intel_pmc_is_enabled,
        .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,