]> www.infradead.org Git - users/hch/configfs.git/commitdiff
perf/x86: Fix smp_processor_id()-in-preemptible warnings
authorLi Huafei <lihuafei1@huawei.com>
Mon, 29 Jul 2024 22:09:28 +0000 (06:09 +0800)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 31 Jul 2024 10:57:39 +0000 (12:57 +0200)
The following bug was triggered on a system built with
CONFIG_DEBUG_PREEMPT=y:

 # echo p > /proc/sysrq-trigger

 BUG: using smp_processor_id() in preemptible [00000000] code: sh/117
 caller is perf_event_print_debug+0x1a/0x4c0
 CPU: 3 UID: 0 PID: 117 Comm: sh Not tainted 6.11.0-rc1 #109
 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014
 Call Trace:
  <TASK>
  dump_stack_lvl+0x4f/0x60
  check_preemption_disabled+0xc8/0xd0
  perf_event_print_debug+0x1a/0x4c0
  __handle_sysrq+0x140/0x180
  write_sysrq_trigger+0x61/0x70
  proc_reg_write+0x4e/0x70
  vfs_write+0xd0/0x430
  ? handle_mm_fault+0xc8/0x240
  ksys_write+0x9c/0xd0
  do_syscall_64+0x96/0x190
  entry_SYSCALL_64_after_hwframe+0x4b/0x53

This is because the commit d4b294bf84db ("perf/x86: Hybrid PMU support
for counters") took smp_processor_id() outside the irq critical section.
If a preemption occurs in perf_event_print_debug() and the task is
migrated to another cpu, we may get incorrect pmu debug information.
Move smp_processor_id() back inside the irq critical section to fix this
issue.

Fixes: d4b294bf84db ("perf/x86: Hybrid PMU support for counters")
Signed-off-by: Li Huafei <lihuafei1@huawei.com>
Reviewed-and-tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Link: https://lore.kernel.org/r/20240729220928.325449-1-lihuafei1@huawei.com
arch/x86/events/core.c

index 12f2a0c14d33b436457b8edff459148797fc3f95..be01823b1bb4537932115cd0c2cfd51761e6eba9 100644 (file)
@@ -1520,20 +1520,23 @@ static void x86_pmu_start(struct perf_event *event, int flags)
 void perf_event_print_debug(void)
 {
        u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
+       unsigned long *cntr_mask, *fixed_cntr_mask;
+       struct event_constraint *pebs_constraints;
+       struct cpu_hw_events *cpuc;
        u64 pebs, debugctl;
-       int cpu = smp_processor_id();
-       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
-       unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
-       unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
-       struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
-       unsigned long flags;
-       int idx;
+       int cpu, idx;
+
+       guard(irqsave)();
+
+       cpu = smp_processor_id();
+       cpuc = &per_cpu(cpu_hw_events, cpu);
+       cntr_mask = hybrid(cpuc->pmu, cntr_mask);
+       fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
+       pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
 
        if (!*(u64 *)cntr_mask)
                return;
 
-       local_irq_save(flags);
-
        if (x86_pmu.version >= 2) {
                rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
                rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
@@ -1577,7 +1580,6 @@ void perf_event_print_debug(void)
                pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
                        cpu, idx, pmc_count);
        }
-       local_irq_restore(flags);
 }
 
 void x86_pmu_stop(struct perf_event *event, int flags)