]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
x86/alternatives: Rename 'text_poke_sync()' to 'smp_text_poke_sync_each_cpu()'
authorIngo Molnar <mingo@kernel.org>
Fri, 11 Apr 2025 05:40:52 +0000 (07:40 +0200)
committerIngo Molnar <mingo@kernel.org>
Fri, 11 Apr 2025 09:01:34 +0000 (11:01 +0200)
Unlike sync_core(), text_poke_sync() is a very heavy operation, as
it sends an IPI to every online CPU in the system and waits for
completion.

Reflect this in the name.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: "H . Peter Anvin" <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250411054105.2341982-41-mingo@kernel.org
arch/x86/include/asm/text-patching.h
arch/x86/kernel/alternative.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kprobes/opt.c
arch/x86/kernel/module.c

index f3c9b70afb0c8385c3eae6758f9ac0bca20772c0..d9dbbe9d9667976b97632522ce12153afb6f527d 100644 (file)
@@ -32,7 +32,7 @@ extern void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u
  * an inconsistent instruction while you patch.
  */
 extern void *text_poke(void *addr, const void *opcode, size_t len);
-extern void text_poke_sync(void);
+extern void smp_text_poke_sync_each_cpu(void);
 extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
 extern void *text_poke_copy(void *addr, const void *opcode, size_t len);
 #define text_poke_copy text_poke_copy
index 556a82f576cd24b4d8f0632bbc2365ce0d016213..e4c51d81a72febb6e0fe71551e51377dfc9c2c6b 100644 (file)
@@ -2445,7 +2445,7 @@ static void do_sync_core(void *info)
        sync_core();
 }
 
-void text_poke_sync(void)
+void smp_text_poke_sync_each_cpu(void)
 {
        on_each_cpu(do_sync_core, NULL, 1);
 }
@@ -2469,8 +2469,8 @@ struct smp_text_poke_loc {
 #define TP_ARRAY_NR_ENTRIES_MAX (PAGE_SIZE / sizeof(struct smp_text_poke_loc))
 
 static struct smp_text_poke_array {
-       int nr_entries;
        struct smp_text_poke_loc vec[TP_ARRAY_NR_ENTRIES_MAX];
+       int nr_entries;
 } text_poke_array;
 
 static DEFINE_PER_CPU(atomic_t, text_poke_array_refs);
@@ -2649,7 +2649,7 @@ static void smp_text_poke_batch_process(void)
                text_poke(text_poke_addr(&text_poke_array.vec[i]), &int3, INT3_INSN_SIZE);
        }
 
-       text_poke_sync();
+       smp_text_poke_sync_each_cpu();
 
        /*
         * Second step: update all but the first byte of the patched range.
@@ -2711,7 +2711,7 @@ static void smp_text_poke_batch_process(void)
                 * not necessary and we'd be safe even without it. But
                 * better safe than sorry (plus there's not only Intel).
                 */
-               text_poke_sync();
+               smp_text_poke_sync_each_cpu();
        }
 
        /*
@@ -2732,13 +2732,13 @@ static void smp_text_poke_batch_process(void)
        }
 
        if (do_sync)
-               text_poke_sync();
+               smp_text_poke_sync_each_cpu();
 
        /*
         * Remove and wait for refs to be zero.
         *
         * Notably, if after step-3 above the INT3 got removed, then the
-        * text_poke_sync() will have serialized against any running INT3
+        * smp_text_poke_sync_each_cpu() will have serialized against any running INT3
         * handlers and the below spin-wait will not happen.
         *
         * IOW. unless the replacement instruction is INT3, this case goes
index 09608fd936876b51ea9507a3a76a1a8e4f59541a..47cb8eb138ba6ed1f1870d512ae153c0b60229c1 100644 (file)
@@ -808,7 +808,7 @@ void arch_arm_kprobe(struct kprobe *p)
        u8 int3 = INT3_INSN_OPCODE;
 
        text_poke(p->addr, &int3, 1);
-       text_poke_sync();
+       smp_text_poke_sync_each_cpu();
        perf_event_text_poke(p->addr, &p->opcode, 1, &int3, 1);
 }
 
@@ -818,7 +818,7 @@ void arch_disarm_kprobe(struct kprobe *p)
 
        perf_event_text_poke(p->addr, &int3, 1, &p->opcode, 1);
        text_poke(p->addr, &p->opcode, 1);
-       text_poke_sync();
+       smp_text_poke_sync_each_cpu();
 }
 
 void arch_remove_kprobe(struct kprobe *p)
index 9307a40f4983401887a9d24123388aac3cd3ddfe..0aabd4c4e2c4f6c4eec3fe0e176fe93e46daef7a 100644 (file)
@@ -513,11 +513,11 @@ void arch_unoptimize_kprobe(struct optimized_kprobe *op)
               JMP32_INSN_SIZE - INT3_INSN_SIZE);
 
        text_poke(addr, new, INT3_INSN_SIZE);
-       text_poke_sync();
+       smp_text_poke_sync_each_cpu();
        text_poke(addr + INT3_INSN_SIZE,
                  new + INT3_INSN_SIZE,
                  JMP32_INSN_SIZE - INT3_INSN_SIZE);
-       text_poke_sync();
+       smp_text_poke_sync_each_cpu();
 
        perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE);
 }
index a7998f35170175cd75d17a1f3a5fc9d3761e56ba..231d6326d1fde6556f14d639b71bbe8f6c87242e 100644 (file)
@@ -206,7 +206,7 @@ static int write_relocate_add(Elf64_Shdr *sechdrs,
                                   write, apply);
 
        if (!early) {
-               text_poke_sync();
+               smp_text_poke_sync_each_cpu();
                mutex_unlock(&text_mutex);
        }