From 01c7bc5198e9ef5628fad0346c5cdba2633a79da Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 3 Mar 2025 11:52:39 -0500 Subject: [PATCH] x86/smp: Move cpu number to percpu hot section No functional change. Signed-off-by: Brian Gerst Signed-off-by: Ingo Molnar Acked-by: Uros Bizjak Cc: Linus Torvalds Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20250303165246.2175811-5-brgerst@gmail.com --- arch/x86/include/asm/current.h | 1 - arch/x86/include/asm/smp.h | 7 ++++--- arch/x86/kernel/setup_percpu.c | 5 ++++- kernel/bpf/verifier.c | 4 ++-- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h index 46a736d6f2ec..f988462d8b69 100644 --- a/arch/x86/include/asm/current.h +++ b/arch/x86/include/asm/current.h @@ -14,7 +14,6 @@ struct task_struct; struct pcpu_hot { struct task_struct *current_task; - int cpu_number; #ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING u64 call_depth; #endif diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 76d7c013cd08..bcfa00232d79 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -6,7 +6,8 @@ #include #include -#include + +DECLARE_PER_CPU_CACHE_HOT(int, cpu_number); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); @@ -132,8 +133,8 @@ __visible void smp_call_function_single_interrupt(struct pt_regs *r); * This function is needed by all SMP systems. It must _always_ be valid * from the initial startup. */ -#define raw_smp_processor_id() this_cpu_read(pcpu_hot.cpu_number) -#define __smp_processor_id() __this_cpu_read(pcpu_hot.cpu_number) +#define raw_smp_processor_id() this_cpu_read(cpu_number) +#define __smp_processor_id() __this_cpu_read(cpu_number) static inline struct cpumask *cpu_llc_shared_mask(int cpu) { diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 1e7be9409aa2..175afc3ffb12 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -23,6 +23,9 @@ #include #include +DEFINE_PER_CPU_CACHE_HOT(int, cpu_number); +EXPORT_PER_CPU_SYMBOL(cpu_number); + DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); EXPORT_PER_CPU_SYMBOL(this_cpu_off); @@ -161,7 +164,7 @@ void __init setup_per_cpu_areas(void) for_each_possible_cpu(cpu) { per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); - per_cpu(pcpu_hot.cpu_number, cpu) = cpu; + per_cpu(cpu_number, cpu) = cpu; setup_percpu_segment(cpu); /* * Copy data used in early init routines from the diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f4859516b190..6e604caa870c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -21702,12 +21702,12 @@ patch_map_ops_generic: if (insn->imm == BPF_FUNC_get_smp_processor_id && verifier_inlines_helper_call(env, insn->imm)) { /* BPF_FUNC_get_smp_processor_id inlining is an - * optimization, so if pcpu_hot.cpu_number is ever + * optimization, so if cpu_number is ever * changed in some incompatible and hard to support * way, it's fine to back out this inlining logic */ #ifdef CONFIG_SMP - insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, (u32)(unsigned long)&pcpu_hot.cpu_number); + insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, (u32)(unsigned long)&cpu_number); insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0); insn_buf[2] = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0); cnt = 3; -- 2.49.0