return instruction_pointer(regs);
}
-static void free_ret_instance(struct return_instance *ri, bool cleanup_hprobe)
+static void ri_pool_push(struct uprobe_task *utask, struct return_instance *ri)
{
+ ri->cons_cnt = 0;
+ ri->next = utask->ri_pool;
+ utask->ri_pool = ri;
+}
+
+static struct return_instance *ri_pool_pop(struct uprobe_task *utask)
+{
+ struct return_instance *ri = utask->ri_pool;
+
+ if (likely(ri))
+ utask->ri_pool = ri->next;
+
+ return ri;
+}
+
+static void ri_free(struct return_instance *ri)
+{
+ kfree(ri->extra_consumers);
+ kfree_rcu(ri, rcu);
+}
+
+static void free_ret_instance(struct uprobe_task *utask,
+ struct return_instance *ri, bool cleanup_hprobe)
+{
+ unsigned seq;
+
if (cleanup_hprobe) {
enum hprobe_state hstate;
hprobe_finalize(&ri->hprobe, hstate);
}
- kfree(ri->extra_consumers);
- kfree_rcu(ri, rcu);
+ /*
+ * At this point return_instance is unlinked from utask's
+ * return_instances list and this has become visible to ri_timer().
+ * If seqcount now indicates that ri_timer's return instance
+ * processing loop isn't active, we can return ri into the pool of
+ * to-be-reused return instances for future uretprobes. If ri_timer()
+ * happens to be running right now, though, we fallback to safety and
+ * just perform RCU-delated freeing of ri.
+ */
+ if (raw_seqcount_try_begin(&utask->ri_seqcount, seq)) {
+ /* immediate reuse of ri without RCU GP is OK */
+ ri_pool_push(utask, ri);
+ } else {
+ /* we might be racing with ri_timer(), so play it safe */
+ ri_free(ri);
+ }
}
/*
ri = utask->return_instances;
while (ri) {
ri_next = ri->next;
- free_ret_instance(ri, true /* cleanup_hprobe */);
+ free_ret_instance(utask, ri, true /* cleanup_hprobe */);
+ ri = ri_next;
+ }
+
+ /* free_ret_instance() above might add to ri_pool, so this loop should come last */
+ ri = utask->ri_pool;
+ while (ri) {
+ ri_next = ri->next;
+ ri_free(ri);
ri = ri_next;
}
/* RCU protects return_instance from freeing. */
guard(rcu)();
+ write_seqcount_begin(&utask->ri_seqcount);
+
for_each_ret_instance_rcu(ri, utask->return_instances)
hprobe_expire(&ri->hprobe, false);
+
+ write_seqcount_end(&utask->ri_seqcount);
}
static struct uprobe_task *alloc_utask(void)
return NULL;
timer_setup(&utask->ri_timer, ri_timer, 0);
+ seqcount_init(&utask->ri_seqcount);
return utask;
}
return current->utask;
}
-static struct return_instance *alloc_return_instance(void)
+static struct return_instance *alloc_return_instance(struct uprobe_task *utask)
{
struct return_instance *ri;
+ ri = ri_pool_pop(utask);
+ if (ri)
+ return ri;
+
ri = kzalloc(sizeof(*ri), GFP_KERNEL);
if (!ri)
return ZERO_SIZE_PTR;
rcu_assign_pointer(utask->return_instances, ri_next);
utask->depth--;
- free_ret_instance(ri, true /* cleanup_hprobe */);
+ free_ret_instance(utask, ri, true /* cleanup_hprobe */);
ri = ri_next;
}
}
return;
free:
- kfree(ri);
+ ri_free(ri);
}
/* Prepare to single-step probed instruction out of line. */
if (unlikely(ri->cons_cnt > 0)) {
ric = krealloc(ri->extra_consumers, sizeof(*ric) * ri->cons_cnt, GFP_KERNEL);
if (!ric) {
- kfree(ri->extra_consumers);
- kfree_rcu(ri, rcu);
+ ri_free(ri);
return ZERO_SIZE_PTR;
}
ri->extra_consumers = ric;
struct uprobe_consumer *uc;
bool has_consumers = false, remove = true;
struct return_instance *ri = NULL;
+ struct uprobe_task *utask = current->utask;
- current->utask->auprobe = &uprobe->arch;
+ utask->auprobe = &uprobe->arch;
list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
bool session = uc->handler && uc->ret_handler;
continue;
if (!ri)
- ri = alloc_return_instance();
+ ri = alloc_return_instance(utask);
if (session)
ri = push_consumer(ri, uc->id, cookie);
}
- current->utask->auprobe = NULL;
+ utask->auprobe = NULL;
if (!ZERO_OR_NULL_PTR(ri))
prepare_uretprobe(uprobe, regs, ri);
hprobe_finalize(&ri->hprobe, hstate);
/* We already took care of hprobe, no need to waste more time on that. */
- free_ret_instance(ri, false /* !cleanup_hprobe */);
+ free_ret_instance(utask, ri, false /* !cleanup_hprobe */);
ri = ri_next;
} while (ri != next_chain);
} while (!valid);