]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
x86/alternatives: Rename 'bp_refs' to 'text_poke_array_refs'
authorIngo Molnar <mingo@kernel.org>
Fri, 11 Apr 2025 05:40:16 +0000 (07:40 +0200)
committerIngo Molnar <mingo@kernel.org>
Fri, 11 Apr 2025 09:01:33 +0000 (11:01 +0200)
Make it clear that these reference counts lock access
to text_poke_array.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: "H . Peter Anvin" <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250411054105.2341982-5-mingo@kernel.org
arch/x86/kernel/alternative.c

index 8edf7d3fd18400f1913c0af3a3350f71e4ed2041..9bd71c017cfde727509aa3d7569feb7f68779b89 100644 (file)
@@ -2476,14 +2476,14 @@ struct text_poke_int3_vec {
        int nr_entries;
 };
 
-static DEFINE_PER_CPU(atomic_t, bp_refs);
+static DEFINE_PER_CPU(atomic_t, text_poke_array_refs);
 
 static struct text_poke_int3_vec bp_desc;
 
 static __always_inline
 struct text_poke_int3_vec *try_get_desc(void)
 {
-       atomic_t *refs = this_cpu_ptr(&bp_refs);
+       atomic_t *refs = this_cpu_ptr(&text_poke_array_refs);
 
        if (!raw_atomic_inc_not_zero(refs))
                return NULL;
@@ -2493,7 +2493,7 @@ struct text_poke_int3_vec *try_get_desc(void)
 
 static __always_inline void put_desc(void)
 {
-       atomic_t *refs = this_cpu_ptr(&bp_refs);
+       atomic_t *refs = this_cpu_ptr(&text_poke_array_refs);
 
        smp_mb__before_atomic();
        raw_atomic_dec(refs);
@@ -2529,9 +2529,9 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
         * Having observed our INT3 instruction, we now must observe
         * bp_desc with non-zero refcount:
         *
-        *      bp_refs = 1             INT3
+        *      text_poke_array_refs = 1                INT3
         *      WMB                     RMB
-        *      write INT3              if (bp_refs != 0)
+        *      write INT3              if (text_poke_array_refs != 0)
         */
        smp_rmb();
 
@@ -2638,7 +2638,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
         * ensure reading a non-zero refcount provides up to date bp_desc data.
         */
        for_each_possible_cpu(i)
-               atomic_set_release(per_cpu_ptr(&bp_refs, i), 1);
+               atomic_set_release(per_cpu_ptr(&text_poke_array_refs, i), 1);
 
        /*
         * Function tracing can enable thousands of places that need to be
@@ -2760,7 +2760,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
         * unused.
         */
        for_each_possible_cpu(i) {
-               atomic_t *refs = per_cpu_ptr(&bp_refs, i);
+               atomic_t *refs = per_cpu_ptr(&text_poke_array_refs, i);
 
                if (unlikely(!atomic_dec_and_test(refs)))
                        atomic_cond_read_acquire(refs, !VAL);