struct bp_patching_desc {
struct text_poke_loc *vec;
int nr_entries;
- atomic_t refs;
};
+static DEFINE_PER_CPU(atomic_t, bp_refs);
+
static struct bp_patching_desc bp_desc;
static __always_inline
struct bp_patching_desc *try_get_desc(void)
{
- struct bp_patching_desc *desc = &bp_desc;
+ atomic_t *refs = this_cpu_ptr(&bp_refs);
- if (!raw_atomic_inc_not_zero(&desc->refs))
+ if (!raw_atomic_inc_not_zero(refs))
return NULL;
- return desc;
+ return &bp_desc;
}
static __always_inline void put_desc(void)
{
- struct bp_patching_desc *desc = &bp_desc;
+ atomic_t *refs = this_cpu_ptr(&bp_refs);
smp_mb__before_atomic();
- raw_atomic_dec(&desc->refs);
+ raw_atomic_dec(refs);
}
static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
* Having observed our INT3 instruction, we now must observe
* bp_desc with non-zero refcount:
*
- * bp_desc.refs = 1 INT3
- * WMB RMB
- * write INT3 if (bp_desc.refs != 0)
+ * bp_refs = 1 INT3
+ * WMB RMB
+ * write INT3 if (bp_refs != 0)
*/
smp_rmb();
* Corresponds to the implicit memory barrier in try_get_desc() to
* ensure reading a non-zero refcount provides up to date bp_desc data.
*/
- atomic_set_release(&bp_desc.refs, 1);
+ for_each_possible_cpu(i)
+ atomic_set_release(per_cpu_ptr(&bp_refs, i), 1);
/*
* Function tracing can enable thousands of places that need to be
/*
* Remove and wait for refs to be zero.
*/
- if (!atomic_dec_and_test(&bp_desc.refs))
- atomic_cond_read_acquire(&bp_desc.refs, !VAL);
+ for_each_possible_cpu(i) {
+ atomic_t *refs = per_cpu_ptr(&bp_refs, i);
+
+ if (unlikely(!atomic_dec_and_test(refs)))
+ atomic_cond_read_acquire(refs, !VAL);
+ }
}
static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,