fasttrap_id_t *id;
pr_info("fasttrap_pid_probe(PID %d, PC %lx)\n", tp->ftt_pid, tp->ftt_pc);
+ if (atomic64_read(&tp->ftt_proc->ftpc_acount) == 0) {
+pr_info(" Ignored (no longer active)\n");
+ return;
+ }
+
for (id = tp->ftt_ids; id != NULL; id = id->fti_next) {
fasttrap_probe_t *ftp = id->fti_probe;
pid = probe->ftp_pid;
pc = probe->ftp_tps[index].fit_tp->ftt_pc;
id = &probe->ftp_tps[index].fit_id;
+pr_info("fasttrap_tracepoint_disable(PID %d, PC %ld)\n", pid, pc);
ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
mutex_unlock(&bucket->ftb_mtx);
+pr_info("fasttrap_tracepoint_disable: Disabling tracepoint for PID %d, PC %ld\n", pid, pc);
dtrace_tracepoint_disable(pid, pc, &tp->ftt_mtp);
/*
ASSERT(id == probe->ftp_id);
+pr_info("fasttrap_pid_disable(%s%d (%p))...\n", prov->ftp_name, prov->ftp_pid, prov);
mutex_lock(&prov->ftp_mtx);
+pr_info(" Locked ftp_mtx...\n");
/*
* Disable all the associated tracepoints (for fully enabled probes).
if ((prov->ftp_retired || prov->ftp_rcount == 0) && !prov->ftp_marked)
whack = prov->ftp_marked = 1;
+ mutex_unlock(&prov->ftp_mtx);
+pr_info(" Unlocked ftp_mtx...\n");
+
if (whack)
fasttrap_pid_cleanup();
in = 1;
mutex_lock(&fasttrap_cleanup_mtx);
+ if (!fasttrap_cleanup_work && fasttrap_cleanup_state == CLEANUP_NONE)
+{
+pr_info("fasttrap_pid_cleanup_cb: [CPU%02d] fasttrap_cleanup_work = %d, fasttrap_cleanup_state = %d -> nothing to do\n", smp_processor_id(), fasttrap_cleanup_work, fasttrap_cleanup_state);
+ mutex_unlock(&fasttrap_cleanup_mtx);
+ return;
+}
+pr_info("fasttrap_pid_cleanup_cb: [CPU%02d] fasttrap_cleanup_work = %d\n", smp_processor_id(), fasttrap_cleanup_work);
while (fasttrap_cleanup_work) {
+pr_info("fasttrap_pid_cleanup_cb: [CPU%02d] -> fasttrap_cleanup_work = %d\n", smp_processor_id(), fasttrap_cleanup_work);
fasttrap_cleanup_work = 0;
mutex_unlock(&fasttrap_cleanup_mtx);
fpp = (fasttrap_provider_t **)&bucket->ftb_data;
while ((fp = *fpp) != NULL) {
+pr_info(" Checking provider %s%d (%p)", fp->ftp_name, fp->ftp_pid, fp);
if (!fp->ftp_marked) {
+pr_info(" Not marked, so ignoring it...\n");
fpp = &fp->ftp_next;
continue;
}
+pr_info(" Marked...\n");
mutex_lock(&fp->ftp_mtx);
+pr_info(" Locked ftp_mtx...\n");
/*
* If this provider has consumers actively
* provider (ftp_mcount), we can't unregister
* or even condense.
*/
+pr_info(" ccount %lld, mcount %lld, rcount %lld, retired %d\n", fp->ftp_ccount, fp->ftp_mcount, fp->ftp_rcount, fp->ftp_retired);
if (fp->ftp_ccount != 0 ||
fp->ftp_mcount != 0) {
+pr_info(" %s, so ignoring it...\n", fp->ftp_ccount != 0 ? "Consumers found" : fp->ftp_mcount ? "USDT consumers found" : "BUG");
mutex_unlock(&fp->ftp_mtx);
fp->ftp_marked = 0;
continue;
fp->ftp_marked = 0;
mutex_unlock(&fp->ftp_mtx);
+pr_info(" Unlocked ftp_mtx...\n");
/*
* If we successfully unregister this
* clean out the unenabled probes.
*/
provid = fp->ftp_provid;
- if (dtrace_unregister(provid) != 0) {
+pr_info(" Calling dtrace_unregister() for %s%d (%p)\n", fp->ftp_name, fp->ftp_pid, fp);
+{
+int rc;
+ if ((rc = dtrace_unregister(provid)) != 0) {
+pr_info(" -> returns %d for %s%d\n", rc, fp->ftp_name, fp->ftp_pid);
if (atomic_read(&fasttrap_total) >
- fasttrap_max / 2)
+ fasttrap_max / 2) {
+pr_info(" Calling dtrace_condense() for %s%d\n", fp->ftp_name, fp->ftp_pid);
dtrace_condense(provid);
+}
later += fp->ftp_marked;
+pr_info(" Increasing later to %d\n", later);
fpp = &fp->ftp_next;
} else {
*fpp = fp->ftp_next;
+pr_info(" Calling fasttrap_provider_free() for %s%d (%p)\n", fp->ftp_name, fp->ftp_pid, fp);
fasttrap_provider_free(fp);
}
+}
}
mutex_unlock(&bucket->ftb_mtx);
mutex_lock(&fasttrap_cleanup_mtx);
}
+pr_info("fasttrap_pid_cleanup_cb: [CPU%02d] fasttrap_cleanup_state = %d\n", smp_processor_id(), fasttrap_cleanup_state);
ASSERT(fasttrap_cleanup_state != CLEANUP_NONE);
/*
* get a chance to do that work if and when the timeout is reenabled
* (if detach fails).
*/
- if (later > 0 && fasttrap_cleanup_state != CLEANUP_DEFERRED) {
- struct delayed_work *dw = container_of(work,
- struct delayed_work,
- work);
-
- fasttrap_cleanup_state = CLEANUP_SCHEDULED;
- schedule_delayed_work(dw, HZ);
- } else if (later > 0) {
- fasttrap_cleanup_work = 1;
+ if (later > 0) {
+ if (fasttrap_cleanup_state == CLEANUP_DEFERRED)
+ fasttrap_cleanup_work = 1;
+ else {
+ struct delayed_work *dw = container_of(
+ work,
+ struct delayed_work,
+ work);
+
+ fasttrap_cleanup_state = CLEANUP_SCHEDULED;
+pr_info("fasttrap_pid_cleanup_cb: [CPU%02d] fasttrap_cleanup_state <- %d (%s:%d\n", smp_processor_id(), fasttrap_cleanup_state, __FILE__, __LINE__);
+ schedule_delayed_work(dw, HZ);
+ }
} else
+{
fasttrap_cleanup_state = CLEANUP_NONE;
+pr_info("fasttrap_pid_cleanup_cb: [CPU%02d] fasttrap_cleanup_state <- %d (%s:%d\n", smp_processor_id(), fasttrap_cleanup_state, __FILE__, __LINE__);
+}
mutex_unlock(&fasttrap_cleanup_mtx);
in = 0;
fasttrap_cleanup_work = 1;
fasttrap_cleanup_state = CLEANUP_SCHEDULED;
pr_info("FASTTRAP: -> Scheduling delayed cleanup...\n");
+pr_info("fasttrap_pid_cleanup: [CPU%02d] fasttrap_cleanup_state <- %d (%s:%d\n", smp_processor_id(), fasttrap_cleanup_state, __FILE__, __LINE__);
schedule_delayed_work(&fasttrap_cleanup, 3);
mutex_unlock(&fasttrap_cleanup_mtx);
}
tmp = fasttrap_cleanup_state;
fasttrap_cleanup_state = CLEANUP_DEFERRED;
+pr_info("fasttrap_dev_exit: [CPU%02d] fasttrap_cleanup_state <- %d (%s:%d\n", smp_processor_id(), fasttrap_cleanup_state, __FILE__, __LINE__);
if (tmp != CLEANUP_NONE) {
mutex_unlock(&fasttrap_cleanup_mtx);