};
struct text_poke_int3_vec {
- struct smp_text_poke_loc *vec;
int nr_entries;
+ struct smp_text_poke_loc *vec;
};
static DEFINE_PER_CPU(atomic_t, text_poke_array_refs);
static struct text_poke_int3_vec int3_vec;
+#define TP_ARRAY_NR_ENTRIES_MAX (PAGE_SIZE / sizeof(struct smp_text_poke_loc))
+
+static struct smp_text_poke_array {
+ int nr_entries;
+ struct smp_text_poke_loc vec[TP_ARRAY_NR_ENTRIES_MAX];
+} text_poke_array;
+
static __always_inline
struct text_poke_int3_vec *try_get_desc(void)
{
return 0;
}
-#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct smp_text_poke_loc))
-static struct smp_text_poke_loc tp_vec[TP_VEC_MAX];
-static int tp_vec_nr;
-
noinstr int smp_text_poke_int3_handler(struct pt_regs *regs)
{
struct text_poke_int3_vec *desc;
if (!desc)
return 0;
- WARN_ON_ONCE(desc->vec != tp_vec);
+ WARN_ON_ONCE(desc->vec != text_poke_array.vec);
/*
* Discount the INT3. See smp_text_poke_batch_process().
lockdep_assert_held(&text_mutex);
- WARN_ON_ONCE(tp != tp_vec);
- WARN_ON_ONCE(nr_entries != tp_vec_nr);
+ WARN_ON_ONCE(tp != text_poke_array.vec);
+ WARN_ON_ONCE(nr_entries != text_poke_array.nr_entries);
int3_vec.vec = tp;
int3_vec.nr_entries = nr_entries;
}
/*
- * We hard rely on the tp_vec being ordered; ensure this is so by flushing
+ * We hard rely on the text_poke_array.vec being ordered; ensure this is so by flushing
* early if needed.
*/
static bool text_poke_addr_ordered(void *addr)
WARN_ON_ONCE(!addr);
- if (!tp_vec_nr)
+ if (!text_poke_array.nr_entries)
return true;
/*
* is violated and we must first flush all pending patching
* requests:
*/
- tp = &tp_vec[tp_vec_nr-1];
+ tp = &text_poke_array.vec[text_poke_array.nr_entries-1];
if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
return false;
void smp_text_poke_batch_finish(void)
{
- if (tp_vec_nr) {
- smp_text_poke_batch_process(tp_vec, tp_vec_nr);
- tp_vec_nr = 0;
+ if (text_poke_array.nr_entries) {
+ smp_text_poke_batch_process(text_poke_array.vec, text_poke_array.nr_entries);
+ text_poke_array.nr_entries = 0;
}
}
{
lockdep_assert_held(&text_mutex);
- if (tp_vec_nr == TP_VEC_MAX || !text_poke_addr_ordered(addr)) {
- smp_text_poke_batch_process(tp_vec, tp_vec_nr);
- tp_vec_nr = 0;
+ if (text_poke_array.nr_entries == TP_ARRAY_NR_ENTRIES_MAX || !text_poke_addr_ordered(addr)) {
+ smp_text_poke_batch_process(text_poke_array.vec, text_poke_array.nr_entries);
+ text_poke_array.nr_entries = 0;
}
}
smp_text_poke_batch_flush(addr);
- tp = &tp_vec[tp_vec_nr++];
+ tp = &text_poke_array.vec[text_poke_array.nr_entries++];
text_poke_int3_loc_init(tp, addr, opcode, len, emulate);
}
struct smp_text_poke_loc *tp;
/* Batch-patching should not be mixed with single-patching: */
- WARN_ON_ONCE(tp_vec_nr != 0);
+ WARN_ON_ONCE(text_poke_array.nr_entries != 0);
- tp = &tp_vec[tp_vec_nr++];
+ tp = &text_poke_array.vec[text_poke_array.nr_entries++];
text_poke_int3_loc_init(tp, addr, opcode, len, emulate);
smp_text_poke_batch_finish();