typedef struct dtrace_mops {
void (*dtms_create_probe)(void *, void *, dtrace_helper_probedesc_t *);
- void (*dtms_provide_pid)(void *, dtrace_helper_provdesc_t *, pid_t);
+ void *(*dtms_provide_pid)(void *, dtrace_helper_provdesc_t *, pid_t);
void (*dtms_remove_pid)(void *, dtrace_helper_provdesc_t *, pid_t);
} dtrace_mops_t;
typedef uintptr_t dtrace_provider_id_t;
typedef uintptr_t dtrace_meta_provider_id_t;
+typedef struct dtrace_meta {
+ dtrace_mops_t dtm_mops;
+ char *dtm_name;
+ void *dtm_arg;
+ uint64_t dtm_count;
+} dtrace_meta_t;
+
extern dtrace_provider_t *dtrace_provider;
+extern dtrace_meta_t *dtrace_meta_pid;
+extern dtrace_helpers_t *dtrace_deferred_pid;
extern int dtrace_register(const char *, const dtrace_pattr_t *, uint32_t,
- cred_t *, const dtrace_pops_t *, void *,
+ const cred_t *, const dtrace_pops_t *, void *,
dtrace_provider_id_t *);
extern int dtrace_unregister(dtrace_provider_id_t);
extern void dtrace_invalidate(dtrace_provider_id_t);
extern int dtrace_difo_validate(dtrace_difo_t *, dtrace_vstate_t *, uint_t,
const cred_t *);
+extern int dtrace_difo_validate_helper(dtrace_difo_t *);
extern int dtrace_difo_cacheable(dtrace_difo_t *);
extern void dtrace_difo_hold(dtrace_difo_t *);
extern void dtrace_difo_init(dtrace_difo_t *, dtrace_vstate_t *);
/*
* DTrace Helper Functions
*/
+extern void dtrace_helpers_destroy(struct task_struct *);
extern uint64_t dtrace_helper(int, dtrace_mstate_t *, dtrace_state_t *,
uint64_t, uint64_t);
uint32_t dofxr_argn;
} dof_xlref_t;
+extern void dtrace_dof_error(dof_hdr_t *, const char *);
extern dof_hdr_t *dtrace_dof_create(dtrace_state_t *);
extern dof_hdr_t *dtrace_dof_copyin(void __user *, int *);
extern dof_hdr_t *dtrace_dof_property(const char *);
extern int dtrace_dof_slurp(dof_hdr_t *, dtrace_vstate_t *, const cred_t *,
dtrace_enabling_t **, uint64_t, int);
extern int dtrace_dof_options(dof_hdr_t *, dtrace_state_t *);
+extern void dtrace_helper_provide(dof_helper_t *dhp, pid_t pid);
+extern int dtrace_helper_slurp(dof_hdr_t *, dof_helper_t *);
+extern int dtrace_helper_destroygen(int);
/*
* DTrace Anonymous Enabling Functions
module_init(name##_init); \
module_exit(name##_exit);
+#define DT_META_PROVIDER_MODULE(name) \
+ dtrace_meta_provider_id_t name##_id; \
+ \
+ static int __init name##_init(void) \
+ { \
+ int ret = 0; \
+ \
+ ret = name##_dev_init(); \
+ if (ret) \
+ goto failed; \
+ \
+ ret = dtrace_meta_register(__stringify(name), &name##_mops, \
+ NULL, &name##_id); \
+ if (ret) \
+ goto failed; \
+ \
+ return 0; \
+ \
+ failed: \
+ return ret; \
+ } \
+ \
+ static void __exit name##_exit(void) \
+ { \
+ dtrace_meta_unregister(name##_id); \
+ name##_dev_exit(); \
+ } \
+ \
+ module_init(name##_init); \
+ module_exit(name##_exit);
+
typedef struct dtrace_mprovider {
char *dtmp_name;
char *dtmp_pref;
return 0;
}
+static int dtrace_open(struct inode *inode, struct file *file)
+{
+ dtrace_state_t *state;
+ uint32_t priv;
+ uid_t uid;
+
+ dtrace_cred2priv(file->f_cred, &priv, &uid);
+ if (priv == DTRACE_PRIV_NONE)
+ return -EACCES;
+
+ mutex_lock(&dtrace_provider_lock);
+ dtrace_probe_provide(NULL, NULL);
+ mutex_unlock(&dtrace_provider_lock);
+
+ mutex_lock(&cpu_lock);
+ mutex_lock(&dtrace_lock);
+ dtrace_opens++;
+ dtrace_membar_producer();
+
+#ifdef FIXME
+ /*
+ * Is this relevant for Linux? Is there an equivalent?
+ */
+ if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
+ dtrace_opens--;
+ mutex_unlock(&cpu_lock);
+ mutex_unlock(&dtrace_lock);
+ return -EBUSY;
+ }
+#endif
+
+ state = dtrace_state_create(file);
+ mutex_unlock(&cpu_lock);
+
+ if (state == NULL) {
+#ifdef FIXME
+ if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
+ (void)kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
+#endif
+
+ mutex_unlock(&dtrace_lock);
+
+ return -EAGAIN;
+ }
+
+ file->private_data = state;
+
+ /*
+ * We only want to enable trap handling once, so we'll do it for the
+ * first open of the DTrace core device file.
+ * FIXME: If anonymous tracing is enabled, that would have enabled trap
+ * handling already, so we should not do it here again.
+ */
+ if (dtrace_opens == 1)
+ dtrace_enable();
+
+ mutex_unlock(&dtrace_lock);
+
+ return 0;
+}
+
static long dtrace_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
return -ENOTTY;
}
-static int dtrace_open(struct inode *inode, struct file *file)
-{
- dtrace_state_t *state;
- uint32_t priv;
- uid_t uid;
-
- dtrace_cred2priv(file->f_cred, &priv, &uid);
- if (priv == DTRACE_PRIV_NONE)
- return -EACCES;
-
- mutex_lock(&dtrace_provider_lock);
- dtrace_probe_provide(NULL, NULL);
- mutex_unlock(&dtrace_provider_lock);
-
- mutex_lock(&cpu_lock);
- mutex_lock(&dtrace_lock);
- dtrace_opens++;
- dtrace_membar_producer();
-
-#ifdef FIXME
- /*
- * Is this relevant for Linux? Is there an equivalent?
- */
- if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
- dtrace_opens--;
- mutex_unlock(&cpu_lock);
- mutex_unlock(&dtrace_lock);
- return -EBUSY;
- }
-#endif
-
- state = dtrace_state_create(file);
- mutex_unlock(&cpu_lock);
-
- if (state == NULL) {
-#ifdef FIXME
- if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
- (void)kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
-#endif
-
- mutex_unlock(&dtrace_lock);
-
- return -EAGAIN;
- }
-
- file->private_data = state;
-
- /*
- * We only want to enable trap handling once, so we'll do it for the
- * first open of the DTrace core device file.
- * FIXME: If anonymous tracing is enabled, that would have enabled trap
- * handling already, so we should not do it here again.
- */
- if (dtrace_opens == 1)
- dtrace_enable();
-
- mutex_unlock(&dtrace_lock);
-
- return 0;
-}
-
static int dtrace_close(struct inode *inode, struct file *file)
{
dtrace_state_t *state = file->private_data;
return 0;
}
+static int dtrace_helper_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static long dtrace_helper_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int rval;
+ dof_helper_t help, *dhp = NULL;
+ void __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+ case DTRACEHIOC_ADDDOF:
+ if (copy_from_user(&help, argp, sizeof(help)) != 0) {
+ dtrace_dof_error(NULL, "failed to copy DOF helper");
+ return -EFAULT;
+ }
+
+ dhp = &help;
+ argp = (void __user *)help.dofhp_dof;
+
+ /* fallthrough */
+
+ case DTRACEHIOC_ADD: {
+ dof_hdr_t *dof = dtrace_dof_copyin(argp, &rval);
+
+ if (dof == NULL)
+ return rval;
+
+ dt_dbg_ioctl("Helper IOCTL: %s\n",
+ cmd == DTRACEHIOC_ADD ? "AddProbe" : "AddDOF");
+
+ mutex_lock(&dtrace_lock);
+
+ /*
+ * The dtrace_helper_slurp() routine takes responsibility for
+ * the dof -- it may free it now, or it may save it and free it
+ * later.
+ */
+ if ((rval = dtrace_helper_slurp(dof, dhp)) == -1)
+ rval = -EINVAL;
+
+ mutex_unlock(&dtrace_lock);
+
+ dt_dbg_ioctl("Helper IOCTL: %s returning %d\n",
+ cmd == DTRACEHIOC_ADD ? "AddProbe" : "AddDOF",
+ rval);
+
+ return rval;
+ }
+
+ case DTRACEHIOC_REMOVE:
+ dt_dbg_ioctl("Helper IOCTL: Remove gen %ld\n", (uintptr_t)argp);
+
+ mutex_lock(&dtrace_lock);
+
+ rval = dtrace_helper_destroygen((uintptr_t)argp);
+
+ mutex_unlock(&dtrace_lock);
+
+ return rval;
+ default:
+ break;
+ }
+
+ return -ENOTTY;
+}
+
+static int dtrace_helper_close(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
static const struct file_operations dtrace_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = dtrace_ioctl,
.release = dtrace_close,
};
+static const struct file_operations helper_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = dtrace_helper_ioctl,
+ .open = dtrace_helper_open,
+ .release = dtrace_helper_close,
+};
+
static struct miscdevice dtrace_dev = {
.minor = DT_DEV_DTRACE_MINOR,
.name = "dtrace",
.fops = &dtrace_fops,
};
+static struct miscdevice helper_dev = {
+ .minor = DT_DEV_HELPER_MINOR,
+ .name = "helper",
+ .nodename = "dtrace/helper",
+ .fops = &helper_fops,
+};
+
static void
dtrace_module_loaded(struct module *module)
{
return rc;
}
+ /*
+ * Register the device for the DTrace helper.
+ */
+ rc = misc_register(&helper_dev);
+ if (rc) {
+ pr_err("%s: Can't register misc device %d\n",
+ helper_dev.name, helper_dev.minor);
+
+ mutex_unlock(&cpu_lock);
+ mutex_unlock(&dtrace_provider_lock);
+ mutex_unlock(&dtrace_lock);
+
+ return rc;
+ }
+
ctf_forceload();
dtrace_modload = dtrace_module_loaded;
dtrace_modunload = dtrace_module_unloaded;
+ dtrace_helpers_cleanup = dtrace_helpers_destroy;
#ifdef FIXME
dtrace_cpu_init = dtrace_cpu_setup_initial;
- dtrace_helpers_cleanup = dtrace_helpers_destroy;
dtrace_helpers_fork = dtrace_helpers_duplicate;
dtrace_cpustart_init = dtrace_suspend;
dtrace_cpustart_fini = dtrace_resume;
void dtrace_dev_exit(void)
{
kmem_cache_destroy(dtrace_state_cache);
+ misc_deregister(&helper_dev);
misc_deregister(&dtrace_dev);
dtrace_probe_exit();
return err;
}
+/*
+ * Validate a DTrace DIF object that it is to be used as a helper. Helpers
+ * are much more constrained than normal DIFOs. Specifically, they may
+ * not:
+ *
+ * 1. Make calls to subroutines other than copyin(), copyinstr() or
+ * miscellaneous string routines
+ * 2. Access DTrace variables other than the args[] array, and the
+ * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
+ * 3. Have thread-local variables.
+ * 4. Have dynamic variables.
+ */
+int dtrace_difo_validate_helper(dtrace_difo_t *dp)
+{
+ int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
+ int err = 0;
+ uint_t pc;
+
+ for (pc = 0; pc < dp->dtdo_len; pc++) {
+ dif_instr_t instr = dp->dtdo_buf[pc];
+ uint_t v = DIF_INSTR_VAR(instr);
+ uint_t subr = DIF_INSTR_SUBR(instr);
+ uint_t op = DIF_INSTR_OP(instr);
+
+ switch (op) {
+ case DIF_OP_OR:
+ case DIF_OP_XOR:
+ case DIF_OP_AND:
+ case DIF_OP_SLL:
+ case DIF_OP_SRL:
+ case DIF_OP_SRA:
+ case DIF_OP_SUB:
+ case DIF_OP_ADD:
+ case DIF_OP_MUL:
+ case DIF_OP_SDIV:
+ case DIF_OP_UDIV:
+ case DIF_OP_SREM:
+ case DIF_OP_UREM:
+ case DIF_OP_COPYS:
+ case DIF_OP_NOT:
+ case DIF_OP_MOV:
+ case DIF_OP_RLDSB:
+ case DIF_OP_RLDSH:
+ case DIF_OP_RLDSW:
+ case DIF_OP_RLDUB:
+ case DIF_OP_RLDUH:
+ case DIF_OP_RLDUW:
+ case DIF_OP_RLDX:
+ case DIF_OP_ULDSB:
+ case DIF_OP_ULDSH:
+ case DIF_OP_ULDSW:
+ case DIF_OP_ULDUB:
+ case DIF_OP_ULDUH:
+ case DIF_OP_ULDUW:
+ case DIF_OP_ULDX:
+ case DIF_OP_STB:
+ case DIF_OP_STH:
+ case DIF_OP_STW:
+ case DIF_OP_STX:
+ case DIF_OP_ALLOCS:
+ case DIF_OP_CMP:
+ case DIF_OP_SCMP:
+ case DIF_OP_TST:
+ case DIF_OP_BA:
+ case DIF_OP_BE:
+ case DIF_OP_BNE:
+ case DIF_OP_BG:
+ case DIF_OP_BGU:
+ case DIF_OP_BGE:
+ case DIF_OP_BGEU:
+ case DIF_OP_BL:
+ case DIF_OP_BLU:
+ case DIF_OP_BLE:
+ case DIF_OP_BLEU:
+ case DIF_OP_RET:
+ case DIF_OP_NOP:
+ case DIF_OP_POPTS:
+ case DIF_OP_FLUSHTS:
+ case DIF_OP_SETX:
+ case DIF_OP_SETS:
+ case DIF_OP_LDGA:
+ case DIF_OP_LDLS:
+ case DIF_OP_STGS:
+ case DIF_OP_STLS:
+ case DIF_OP_PUSHTR:
+ case DIF_OP_PUSHTV:
+ break;
+
+ case DIF_OP_LDGS:
+ if (v >= DIF_VAR_OTHER_UBASE)
+ break;
+
+ if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
+ break;
+
+ if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
+ v == DIF_VAR_PPID || v == DIF_VAR_TID ||
+ v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
+ v == DIF_VAR_UID || v == DIF_VAR_GID)
+ break;
+
+ err += efunc(pc, "illegal variable %u\n", v);
+ break;
+
+ case DIF_OP_LDTA:
+ case DIF_OP_LDGAA:
+ case DIF_OP_LDTAA:
+ err += efunc(pc, "illegal dynamic variable load\n");
+ break;
+
+ case DIF_OP_STTS:
+ case DIF_OP_STGAA:
+ case DIF_OP_STTAA:
+ err += efunc(pc, "illegal dynamic variable store\n");
+ break;
+
+ case DIF_OP_CALL:
+ if (subr == DIF_SUBR_ALLOCA ||
+ subr == DIF_SUBR_BCOPY ||
+ subr == DIF_SUBR_COPYIN ||
+ subr == DIF_SUBR_COPYINTO ||
+ subr == DIF_SUBR_COPYINSTR ||
+ subr == DIF_SUBR_INDEX ||
+ subr == DIF_SUBR_INET_NTOA ||
+ subr == DIF_SUBR_INET_NTOA6 ||
+ subr == DIF_SUBR_INET_NTOP ||
+ subr == DIF_SUBR_LLTOSTR ||
+ subr == DIF_SUBR_RINDEX ||
+ subr == DIF_SUBR_STRCHR ||
+ subr == DIF_SUBR_STRJOIN ||
+ subr == DIF_SUBR_STRRCHR ||
+ subr == DIF_SUBR_STRSTR ||
+ subr == DIF_SUBR_HTONS ||
+ subr == DIF_SUBR_HTONL ||
+ subr == DIF_SUBR_HTONLL ||
+ subr == DIF_SUBR_NTOHS ||
+ subr == DIF_SUBR_NTOHL ||
+ subr == DIF_SUBR_NTOHLL)
+ break;
+
+ err += efunc(pc, "invalid subr %u\n", subr);
+ break;
+
+ default:
+ err += efunc(pc, "invalid opcode %u\n",
+ DIF_INSTR_OP(instr));
+ }
+ }
+
+ return err;
+}
+
/*
* Returns 1 if the expression in the DIF object can be cached on a per-thread
* basis; 0 if not.
*/
static void dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
{
- uint64_t sval;
+ uint64_t sval = 0;
dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* + thread + id */
const dif_instr_t *text = dp->dtdo_buf;
uint_t pc, srd = 0;
#include "dtrace.h"
-size_t dtrace_difo_maxsize = 256 * 1024;
-dtrace_optval_t dtrace_dof_maxsize = 256 * 1024;
-size_t dtrace_actions_max = 16 * 1024;
+size_t dtrace_difo_maxsize = 256 * 1024;
+dtrace_optval_t dtrace_dof_maxsize = 256 * 1024;
+size_t dtrace_actions_max = 16 * 1024;
+dtrace_optval_t dtrace_helper_actions_max = 32;
+dtrace_optval_t dtrace_helper_providers_max = 32;
-static void dtrace_dof_error(dof_hdr_t *dof, const char *str)
+static int dtrace_helpers;
+
+static uint32_t dtrace_helptrace_next = 0;
+static uint32_t dtrace_helptrace_nlocals;
+static char *dtrace_helptrace_buffer;
+static int dtrace_helptrace_bufsize = 512 * 1024;
+
+#ifdef CONFIG_DT_DEBUG
+static int dtrace_helptrace_enabled = 1;
+#else
+static int dtrace_helptrace_enabled = 0;
+#endif
+
+void dtrace_dof_error(dof_hdr_t *dof, const char *str)
{
if (dtrace_err_verbose)
pr_warning("failed to process DOF: %s", str);
+ else
+ dt_dbg_dof("Failed to process DOF: %s\n", str);
#ifdef DTRACE_ERRDEBUG
dtrace_errdebug(str);
strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
min((size_t)DTRACE_NAMELEN - 1, size - probe->dofp_name));
+ dt_dbg_dof(" ECB Probe %s:%s:%s:%s\n",
+ desc->dtpd_provider, desc->dtpd_mod, desc->dtpd_func,
+ desc->dtpd_name);
+
return desc;
}
ASSERT(MUTEX_HELD(&dtrace_lock));
ASSERT(dof->dofh_loadsz >= sizeof(dof_hdr_t));
+ dt_dbg_dof(" DOF 0x%p Slurping...\n", dof);
+
+ dt_dbg_dof(" DOF 0x%p Validating...\n", dof);
+
/*
* Check the DOF header identification bytes. In addition to checking
* valid settings, we also verify that unused bits/bytes are zeroed so
* the headers don't have stray offsets. If the 'noprobes' flag is
* set, do not permit sections relating to providers, probes, or args.
*/
+ dt_dbg_dof(" DOF 0x%p Checking section offsets...\n", dof);
+
for (i = 0; i < dof->dofh_secnum; i++) {
dof_sec_t *sec =
(dof_sec_t *)(daddr +
* relocations that are present. We do this after the first pass to
* be sure that all sections have had their headers validated.
*/
+ dt_dbg_dof(" DOF 0x%p Performing relocations...\n", dof);
+
for (i = 0; i < dof->dofh_secnum; i++) {
dof_sec_t *sec =
(dof_sec_t *)(daddr +
}
}
+ dt_dbg_dof(" DOF 0x%p Processing enablings...\n", dof);
+
if ((enab = *enabp) == NULL)
enab = *enabp = dtrace_enabling_create(vstate);
return 0;
}
+
+static dtrace_helpers_t *dtrace_helpers_create(struct task_struct *curr)
+{
+ dtrace_helpers_t *dth;
+
+ ASSERT(MUTEX_HELD(&dtrace_lock));
+ ASSERT(curr->dtrace_helpers == NULL);
+
+ dth = vzalloc(sizeof(dtrace_helpers_t));
+ dth->dthps_actions = vzalloc(sizeof(dtrace_helper_action_t *) *
+ DTRACE_NHELPER_ACTIONS);
+
+ curr->dtrace_helpers = dth;
+ dtrace_helpers++;
+
+ dt_dbg_dof(" Helpers allocated for task 0x%p (%d system-wide)\n",
+ curr, dtrace_helpers);
+
+ return dth;
+}
+
+static int dtrace_helper_validate(dtrace_helper_action_t *helper)
+{
+ int err = 0, i;
+ dtrace_difo_t *dp;
+
+ if ((dp = helper->dtha_predicate) != NULL)
+ err += dtrace_difo_validate_helper(dp);
+
+ for (i = 0; i < helper->dtha_nactions; i++)
+ err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
+
+ return (err == 0);
+}
+
+static int dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
+{
+ uintptr_t daddr = (uintptr_t)dof;
+ dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
+ dof_provider_t *prov;
+ dof_probe_t *prb;
+ uint8_t *arg;
+ char *strtab, *typestr;
+ dof_stridx_t typeidx;
+ size_t typesz;
+ uint_t nprobes, j, k;
+
+ ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
+
+ if (sec->dofs_offset & (sizeof(uint_t) - 1)) {
+ dtrace_dof_error(dof, "misaligned section offset");
+ return -1;
+ }
+
+ /*
+ * The section needs to be large enough to contain the DOF provider
+ * structure appropriate for the given version.
+ */
+ if (sec->dofs_size <
+ ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1)
+ ? offsetof(dof_provider_t, dofpv_prenoffs)
+ : sizeof(dof_provider_t))) {
+ dtrace_dof_error(dof, "provider section too small");
+ return -1;
+ }
+
+ prov = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
+ str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, prov->dofpv_strtab);
+ prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, prov->dofpv_probes);
+ arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, prov->dofpv_prargs);
+ off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, prov->dofpv_proffs);
+
+ if (str_sec == NULL || prb_sec == NULL ||
+ arg_sec == NULL || off_sec == NULL)
+ return -1;
+
+ enoff_sec = NULL;
+
+ if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
+ prov->dofpv_prenoffs != DOF_SECT_NONE &&
+ (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
+ prov->dofpv_prenoffs)) == NULL)
+ return -1;
+
+ strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
+
+ if (prov->dofpv_name >= str_sec->dofs_size ||
+ strlen(strtab + prov->dofpv_name) >= DTRACE_PROVNAMELEN) {
+ dtrace_dof_error(dof, "invalid provider name");
+ return -1;
+ }
+
+ if (prb_sec->dofs_entsize == 0 ||
+ prb_sec->dofs_entsize > prb_sec->dofs_size) {
+ dtrace_dof_error(dof, "invalid entry size");
+ return -1;
+ }
+
+ if (prb_sec->dofs_entsize & (sizeof(uintptr_t) - 1)) {
+ dtrace_dof_error(dof, "misaligned entry size");
+ return -1;
+ }
+
+ if (off_sec->dofs_entsize != sizeof(uint32_t)) {
+ dtrace_dof_error(dof, "invalid entry size");
+ return -1;
+ }
+
+ if (off_sec->dofs_offset & (sizeof(uint32_t) - 1)) {
+ dtrace_dof_error(dof, "misaligned section offset");
+ return -1;
+ }
+
+ if (arg_sec->dofs_entsize != sizeof(uint8_t)) {
+ dtrace_dof_error(dof, "invalid entry size");
+ return -1;
+ }
+
+ arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
+ nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
+
+ dt_dbg_dof(" DOF 0x%p %s::: with %d probes\n",
+ dof, strtab + prov->dofpv_name, nprobes);
+
+ /*
+ * Take a pass through the probes to check for errors.
+ */
+ for (j = 0; j < nprobes; j++) {
+ prb = (dof_probe_t *)(uintptr_t)
+ (daddr + prb_sec->dofs_offset +
+ j * prb_sec->dofs_entsize);
+
+ if (prb->dofpr_func >= str_sec->dofs_size) {
+ dtrace_dof_error(dof, "invalid function name");
+ return -1;
+ }
+
+ if (strlen(strtab + prb->dofpr_func) >= DTRACE_FUNCNAMELEN) {
+ dtrace_dof_error(dof, "function name too long");
+ return -1;
+ }
+
+ if (prb->dofpr_name >= str_sec->dofs_size ||
+ strlen(strtab + prb->dofpr_name) >= DTRACE_NAMELEN) {
+ dtrace_dof_error(dof, "invalid probe name");
+ return -1;
+ }
+
+ /*
+ * The offset count must not wrap the index, and the offsets
+ * must also not overflow the section's data.
+ */
+ if (prb->dofpr_offidx + prb->dofpr_noffs < prb->dofpr_offidx ||
+ (prb->dofpr_offidx + prb->dofpr_noffs) *
+ off_sec->dofs_entsize > off_sec->dofs_size) {
+ dtrace_dof_error(dof, "invalid probe offset");
+ return -1;
+ }
+
+ if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
+ /*
+ * If there's no is-enabled offset section, make sure
+ * there aren't any is-enabled offsets. Otherwise
+ * perform the same checks as for probe offsets
+ * (immediately above).
+ */
+ if (enoff_sec == NULL) {
+ if (prb->dofpr_enoffidx != 0 ||
+ prb->dofpr_nenoffs != 0) {
+ dtrace_dof_error(dof,
+ "is-enabled offsets "
+ "with null section");
+ return -1;
+ }
+ } else if (prb->dofpr_enoffidx + prb->dofpr_nenoffs <
+ prb->dofpr_enoffidx ||
+ (prb->dofpr_enoffidx + prb->dofpr_nenoffs) *
+ enoff_sec->dofs_entsize >
+ enoff_sec->dofs_size) {
+ dtrace_dof_error(dof, "invalid is-enabled "
+ "offset");
+ return -1;
+ }
+
+ if (prb->dofpr_noffs + prb->dofpr_nenoffs == 0) {
+ dtrace_dof_error(dof, "zero probe and "
+ "is-enabled offsets");
+ return -1;
+ }
+ } else if (prb->dofpr_noffs == 0) {
+ dtrace_dof_error(dof, "zero probe offsets");
+ return -1;
+ }
+
+ if (prb->dofpr_argidx + prb->dofpr_xargc < prb->dofpr_argidx ||
+ (prb->dofpr_argidx + prb->dofpr_xargc) *
+ arg_sec->dofs_entsize > arg_sec->dofs_size) {
+ dtrace_dof_error(dof, "invalid args");
+ return -1;
+ }
+
+ typeidx = prb->dofpr_nargv;
+ typestr = strtab + prb->dofpr_nargv;
+ for (k = 0; k < prb->dofpr_nargc; k++) {
+ if (typeidx >= str_sec->dofs_size) {
+ dtrace_dof_error(dof, "bad native argument "
+ "type");
+ return -1;
+ }
+
+ typesz = strlen(typestr) + 1;
+ if (typesz > DTRACE_ARGTYPELEN) {
+ dtrace_dof_error(dof, "native argument type "
+ "too long");
+ return -1;
+ }
+
+ typeidx += typesz;
+ typestr += typesz;
+ }
+
+ typeidx = prb->dofpr_xargv;
+ typestr = strtab + prb->dofpr_xargv;
+ for (k = 0; k < prb->dofpr_xargc; k++) {
+ if (arg[prb->dofpr_argidx + k] > prb->dofpr_nargc) {
+ dtrace_dof_error(dof, "bad native argument "
+ "index");
+ return -1;
+ }
+
+ if (typeidx >= str_sec->dofs_size) {
+ dtrace_dof_error(dof, "bad translated "
+ "argument type");
+ return -1;
+ }
+
+ typesz = strlen(typestr) + 1;
+ if (typesz > DTRACE_ARGTYPELEN) {
+ dtrace_dof_error(dof, "translated argument "
+ "type too long");
+ return -1;
+ }
+
+ typeidx += typesz;
+ typestr += typesz;
+ }
+
+ dt_dbg_dof(" Probe %d %s:%s:%s:%s with %d offsets, "
+ "%d is-enabled offsets\n", j,
+ strtab + prov->dofpv_name, "",
+ strtab + prb->dofpr_func, strtab + prb->dofpr_name,
+ prb->dofpr_noffs, prb->dofpr_nenoffs);
+ }
+
+ return 0;
+}
+
+static void dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
+ dtrace_vstate_t *vstate)
+{
+ int i;
+
+ if (helper->dtha_predicate != NULL)
+ dtrace_difo_release(helper->dtha_predicate, vstate);
+
+ for (i = 0; i < helper->dtha_nactions; i++) {
+ ASSERT(helper->dtha_actions[i] != NULL);
+ dtrace_difo_release(helper->dtha_actions[i], vstate);
+ }
+
+ vfree(helper->dtha_actions);
+ vfree(helper);
+}
+
+static int dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
+{
+ dtrace_helpers_t *dth;
+ dtrace_helper_action_t *helper, *last;
+ dtrace_actdesc_t *act;
+ dtrace_vstate_t *vstate;
+ dtrace_predicate_t *pred;
+ int count = 0, nactions = 0, i;
+
+ if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
+ return -EINVAL;
+
+ dth = current->dtrace_helpers;
+ last = dth->dthps_actions[which];
+ vstate = &dth->dthps_vstate;
+
+ for (count = 0; last != NULL; last = last->dtha_next) {
+ count++;
+ if (last->dtha_next == NULL)
+ break;
+ }
+
+ /*
+ * If we already have dtrace_helper_actions_max helper actions for this
+ * helper action type, we'll refuse to add a new one.
+ */
+ if (count >= dtrace_helper_actions_max)
+ return -ENOSPC;
+
+ helper = vzalloc(sizeof(dtrace_helper_action_t));
+ helper->dtha_generation = dth->dthps_generation;
+
+ if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
+ ASSERT(pred->dtp_difo != NULL);
+ dtrace_difo_hold(pred->dtp_difo);
+ helper->dtha_predicate = pred->dtp_difo;
+ }
+
+ for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
+ if (act->dtad_kind != DTRACEACT_DIFEXPR)
+ goto err;
+
+ if (act->dtad_difo == NULL)
+ goto err;
+
+ nactions++;
+ }
+
+ helper->dtha_actions = vzalloc(sizeof(dtrace_difo_t *) *
+ (helper->dtha_nactions = nactions));
+
+ for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
+ dtrace_difo_hold(act->dtad_difo);
+ helper->dtha_actions[i++] = act->dtad_difo;
+ }
+
+ if (!dtrace_helper_validate(helper))
+ goto err;
+
+ if (last == NULL)
+ dth->dthps_actions[which] = helper;
+ else
+ last->dtha_next = helper;
+
+ if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
+ dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
+ dtrace_helptrace_next = 0;
+ }
+
+ return 0;
+
+err:
+ dtrace_helper_action_destroy(helper, vstate);
+ return -EINVAL;
+}
+
+static int dtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
+{
+ dtrace_helpers_t *dth;
+ dtrace_helper_provider_t *hprov, **tmp_provs;
+ uint_t tmp_maxprovs, i;
+
+ ASSERT(MUTEX_HELD(&dtrace_lock));
+
+ dth = current->dtrace_helpers;
+ ASSERT(dth != NULL);
+
+ /*
+ * If we already have dtrace_helper_providers_max helper providers,
+ * we're refuse to add a new one.
+ */
+ if (dth->dthps_nprovs >= dtrace_helper_providers_max)
+ return -ENOSPC;
+
+ /*
+ * Check to make sure this isn't a duplicate.
+ */
+ for (i = 0; i < dth->dthps_nprovs; i++) {
+ if (dofhp->dofhp_addr ==
+ dth->dthps_provs[i]->dthp_prov.dofhp_addr)
+ return -EALREADY;
+ }
+
+ hprov = vzalloc(sizeof(dtrace_helper_provider_t));
+ hprov->dthp_prov = *dofhp;
+ hprov->dthp_ref = 1;
+ hprov->dthp_generation = gen;
+
+ /*
+ * Allocate a bigger table for helper providers if it's already full.
+ */
+ if (dth->dthps_maxprovs == dth->dthps_nprovs) {
+ tmp_maxprovs = dth->dthps_maxprovs;
+ tmp_provs = dth->dthps_provs;
+
+ if (dth->dthps_maxprovs == 0)
+ dth->dthps_maxprovs = 2;
+ else
+ dth->dthps_maxprovs *= 2;
+
+ if (dth->dthps_maxprovs > dtrace_helper_providers_max)
+ dth->dthps_maxprovs = dtrace_helper_providers_max;
+
+ ASSERT(tmp_maxprovs < dth->dthps_maxprovs);
+
+ dth->dthps_provs = vzalloc(dth->dthps_maxprovs *
+ sizeof(dtrace_helper_provider_t *));
+
+ if (tmp_provs != NULL) {
+ memcpy(dth->dthps_provs, tmp_provs,
+ tmp_maxprovs *
+ sizeof(dtrace_helper_provider_t *));
+ vfree(tmp_provs);
+ }
+ }
+
+ dth->dthps_provs[dth->dthps_nprovs] = hprov;
+ dth->dthps_nprovs++;
+
+ return 0;
+}
+
+static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
+{
+ mutex_lock(&dtrace_lock);
+
+ if (--hprov->dthp_ref == 0) {
+ dof_hdr_t *dof;
+
+ mutex_unlock(&dtrace_lock);
+
+ dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
+ dtrace_dof_destroy(dof);
+ vfree(hprov);
+ } else
+ mutex_unlock(&dtrace_lock);
+}
+
+static void dtrace_dofattr2attr(dtrace_attribute_t *attr,
+ const dof_attr_t dofattr)
+{
+ attr->dtat_name = DOF_ATTR_NAME(dofattr);
+ attr->dtat_data = DOF_ATTR_DATA(dofattr);
+ attr->dtat_class = DOF_ATTR_CLASS(dofattr);
+}
+
+static void dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
+ const dof_provider_t *dofprov, char *strtab)
+{
+ hprov->dthpv_provname = strtab + dofprov->dofpv_name;
+ dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
+ dofprov->dofpv_provattr);
+ dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
+ dofprov->dofpv_modattr);
+ dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
+ dofprov->dofpv_funcattr);
+ dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
+ dofprov->dofpv_nameattr);
+ dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
+ dofprov->dofpv_argsattr);
+}
+
+static void dtrace_helper_provider_remove_one(dof_helper_t *dhp,
+ dof_sec_t *sec, pid_t pid)
+{
+ uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
+ dof_hdr_t *dof = (dof_hdr_t *)daddr;
+ dof_sec_t *str_sec;
+ dof_provider_t *prov;
+ char *strtab;
+ dtrace_helper_provdesc_t dhpv;
+ dtrace_meta_t *meta = dtrace_meta_pid;
+ dtrace_mops_t *mops = &meta->dtm_mops;
+
+ prov = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
+ str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
+ prov->dofpv_strtab *
+ dof->dofh_secsize);
+
+ strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
+
+ /*
+ * Create the provider.
+ */
+ dtrace_dofprov2hprov(&dhpv, prov, strtab);
+
+ dt_dbg_dof(" Removing provider %s for PID %d\n",
+ dhpv.dthpv_provname, pid);
+
+ mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
+
+ meta->dtm_count--;
+}
+
+static void dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
+{
+ uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
+ dof_hdr_t *dof = (dof_hdr_t *)daddr;
+ int i;
+
+ ASSERT(MUTEX_HELD(&dtrace_meta_lock));
+
+ for (i = 0; i < dof->dofh_secnum; i++) {
+ dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
+ (daddr + dof->dofh_secoff +
+ i * dof->dofh_secsize);
+
+ if (sec->dofs_type != DOF_SECT_PROVIDER)
+ continue;
+
+ dtrace_helper_provider_remove_one(dhp, sec, pid);
+ }
+}
+
+static void dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec,
+ pid_t pid)
+{
+ uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
+ dof_hdr_t *dof = (dof_hdr_t *)daddr;
+ dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec,
+ *enoff_sec;
+ dof_provider_t *prov;
+ dof_probe_t *probe;
+ uint32_t *off, *enoff;
+ uint8_t *arg;
+ char *strtab;
+ uint_t i, nprobes;
+ dtrace_helper_provdesc_t dhpv;
+ dtrace_helper_probedesc_t dhpb;
+ dtrace_meta_t *meta = dtrace_meta_pid;
+ dtrace_mops_t *mops = &meta->dtm_mops;
+ void *parg;
+
+ prov = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
+ str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
+ prov->dofpv_strtab *
+ dof->dofh_secsize);
+ prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
+ prov->dofpv_probes *
+ dof->dofh_secsize);
+ arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
+ prov->dofpv_prargs *
+ dof->dofh_secsize);
+ off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
+ prov->dofpv_proffs *
+ dof->dofh_secsize);
+
+ strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
+ off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
+ arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
+ enoff = NULL;
+
+ /*
+ * See dtrace_helper_provider_validate().
+ */
+ if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
+ prov->dofpv_prenoffs != DOF_SECT_NONE) {
+ enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
+ prov->dofpv_proffs *
+ dof->dofh_secsize);
+ enoff = (uint32_t *)(uintptr_t)(daddr +
+ enoff_sec->dofs_offset);
+ }
+
+ nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
+
+ /*
+ * Create the provider.
+ */
+ dtrace_dofprov2hprov(&dhpv, prov, strtab);
+
+ if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
+ return;
+
+ dt_dbg_dof(" Created provider %s for PID %d\n",
+ strtab + prov->dofpv_name, pid);
+
+ meta->dtm_count++;
+
+ /*
+ * Create the probes.
+ */
+ for (i = 0; i < nprobes; i++) {
+ probe = (dof_probe_t *)(uintptr_t)(daddr +
+ prb_sec->dofs_offset +
+ i * prb_sec->dofs_entsize);
+
+ dhpb.dthpb_mod = dhp->dofhp_mod;
+ dhpb.dthpb_func = strtab + probe->dofpr_func;
+ dhpb.dthpb_name = strtab + probe->dofpr_name;
+ dhpb.dthpb_base = probe->dofpr_addr;
+ dhpb.dthpb_offs = off + probe->dofpr_offidx;
+ dhpb.dthpb_noffs = probe->dofpr_noffs;
+
+ if (enoff != NULL) {
+ dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
+ dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
+ } else {
+ dhpb.dthpb_enoffs = NULL;
+ dhpb.dthpb_nenoffs = 0;
+ }
+
+ dhpb.dthpb_args = arg + probe->dofpr_argidx;
+ dhpb.dthpb_nargc = probe->dofpr_nargc;
+ dhpb.dthpb_xargc = probe->dofpr_xargc;
+ dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
+ dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
+
+ mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
+
+ dt_dbg_dof(" Created probe %s:%s:%s:%s\n",
+ strtab + prov->dofpv_name, "", dhpb.dthpb_func,
+ dhpb.dthpb_name);
+ }
+}
+
+void dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
+{
+ uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
+ dof_hdr_t *dof = (dof_hdr_t *)daddr;
+ int i;
+
+ ASSERT(MUTEX_HELD(&dtrace_meta_lock));
+
+ for (i = 0; i < dof->dofh_secnum; i++) {
+ dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
+ (daddr + dof->dofh_secoff +
+ i * dof->dofh_secsize);
+
+ if (sec->dofs_type != DOF_SECT_PROVIDER)
+ continue;
+
+ dtrace_helper_provide_one(dhp, sec, pid);
+ }
+
+ /*
+ * We may have just created probes, so we must now rematch against any
+ * retained enablings. Note that this call will acquire both cpu_lock
+ * and dtrace_lock; the fact that we are holding dtrace_meta_lock now
+ * is what defines the ordering with respect to these three locks.
+ */
+ dt_dbg_dof(" Re-matching against any retained enablings\n");
+ dtrace_enabling_matchall();
+}
+
+static void dtrace_helper_provider_register(struct task_struct *curr,
+ dtrace_helpers_t *dth,
+ dof_helper_t *dofhp)
+{
+ ASSERT(!MUTEX_HELD(&dtrace_lock));
+
+ mutex_lock(&dtrace_meta_lock);
+ mutex_lock(&dtrace_lock);
+
+ if (!dtrace_attached() || dtrace_meta_pid == NULL) {
+ dt_dbg_dof(" No meta provider registered -- deferred\n");
+
+ /*
+ * If the dtrace module is loaded but not attached, or if there
+ * isn't a meta provider registered to deal with these provider
+ * descriptions, we need to postpone creating the actual
+ * providers until later.
+ */
+ if (dth->dthps_next == NULL && dth->dthps_prev == NULL &&
+ dtrace_deferred_pid != dth) {
+ dth->dthps_deferred = 1;
+ dth->dthps_pid = current->pid;
+ dth->dthps_next = dtrace_deferred_pid;
+ dth->dthps_prev = NULL;
+ if (dtrace_deferred_pid != NULL)
+ dtrace_deferred_pid->dthps_prev = dth;
+ dtrace_deferred_pid = dth;
+ }
+
+ mutex_unlock(&dtrace_lock);
+ } else if (dofhp != NULL) {
+ /*
+ * If the dtrace module is loaded and we have a particular
+ * helper provider description, pass that off to the meta
+ * provider.
+ */
+ mutex_unlock(&dtrace_lock);
+
+ dtrace_helper_provide(dofhp, current->pid);
+ } else {
+ /*
+ * Otherwise, just pass all the helper provider descriptions
+ * off to the meta provider.
+ */
+ int i;
+
+ mutex_unlock(&dtrace_lock);
+
+ for (i = 0; i < dth->dthps_nprovs; i++) {
+ dtrace_helper_provide(&dth->dthps_provs[i]->dthp_prov,
+ current->pid);
+ }
+ }
+
+ mutex_unlock(&dtrace_meta_lock);
+}
+
+int dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
+{
+ dtrace_helpers_t *dth;
+ dtrace_vstate_t *vstate;
+ dtrace_enabling_t *enab = NULL;
+ int i, gen, rv;
+ int nhelpers = 0, nprovs = 0,destroy = 1;
+ uintptr_t daddr = (uintptr_t)dof;
+
+ ASSERT(MUTEX_HELD(&dtrace_lock));
+
+ if ((dth = current->dtrace_helpers) == NULL)
+ dth = dtrace_helpers_create(current);
+
+ vstate = &dth->dthps_vstate;
+
+ if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
+ dhp != NULL ? dhp->dofhp_addr : 0,
+ FALSE)) != 0) {
+ dtrace_dof_destroy(dof);
+ return rv;
+ }
+
+ /*
+ * Look for helper providers and validate their descriptions.
+ */
+ if (dhp != NULL) {
+ dt_dbg_dof(" DOF 0x%p Validating providers...\n", dof);
+
+ for (i = 0; i < dof->dofh_secnum; i++) {
+ dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
+ (daddr + dof->dofh_secoff +
+ i * dof->dofh_secsize);
+
+ if (sec->dofs_type != DOF_SECT_PROVIDER)
+ continue;
+
+ if (dtrace_helper_provider_validate(dof, sec) != 0) {
+ dtrace_enabling_destroy(enab);
+ dtrace_dof_destroy(dof);
+ return -1;
+ }
+
+ nprovs++;
+ }
+ }
+
+ /*
+ * Now we need to walk through the ECB descriptions in the enabling.
+ */
+ for (i = 0; i < enab->dten_ndesc; i++) {
+ dtrace_ecbdesc_t *ep = enab->dten_desc[i];
+ dtrace_probedesc_t *desc = &ep->dted_probe;
+
+ dt_dbg_dof(" ECB Desc %s:%s:%s:%s\n",
+ desc->dtpd_provider, desc->dtpd_mod,
+ desc->dtpd_func, desc->dtpd_name);
+ if (strcmp(desc->dtpd_provider, "dtrace") != 0)
+ continue;
+
+ if (strcmp(desc->dtpd_mod, "helper") != 0)
+ continue;
+
+ if (strcmp(desc->dtpd_func, "ustack") != 0)
+ continue;
+
+ if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
+ ep)) != 0) {
+ /*
+ * Adding this helper action failed -- we are now going
+ * to rip out the entire generation and return failure.
+ */
+ dtrace_helper_destroygen(dth->dthps_generation);
+ dtrace_enabling_destroy(enab);
+ dtrace_dof_destroy(dof);
+ return -1;
+ }
+
+ nhelpers++;
+ }
+
+ if (nhelpers < enab->dten_ndesc)
+ dtrace_dof_error(dof, "unmatched helpers");
+
+ gen = dth->dthps_generation++;
+ dtrace_enabling_destroy(enab);
+
+ if (dhp != NULL && nprovs > 0) {
+ dt_dbg_dof(" DOF 0x%p Adding and registering providers\n",
+ dof);
+
+ dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
+ if (dtrace_helper_provider_add(dhp, gen) == 0) {
+ mutex_unlock(&dtrace_lock);
+ dtrace_helper_provider_register(current, dth, dhp);
+ mutex_lock(&dtrace_lock);
+
+ destroy = 0;
+ }
+ }
+
+ if (destroy)
+ dtrace_dof_destroy(dof);
+
+ return gen;
+}
+
+void dtrace_helpers_destroy(struct task_struct *tsk)
+{
+ dtrace_helpers_t *help;
+ dtrace_vstate_t *vstate;
+ int i;
+
+ mutex_lock(&dtrace_lock);
+
+ ASSERT(tsk->dtrace_helpers != NULL);
+ ASSERT(dtrace_helpers > 0);
+
+ dt_dbg_dof("Helper cleanup: PID %d\n", tsk->pid);
+
+ help = tsk->dtrace_helpers;
+ vstate = &help->dthps_vstate;
+
+ /*
+ * We're now going to lose the help from this process.
+ */
+ tsk->dtrace_helpers = NULL;
+ dtrace_sync();
+
+ /*
+ * Destory the helper actions.
+ */
+ for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
+ dtrace_helper_action_t *h, *next;
+
+ for (h = help->dthps_actions[i]; h != NULL; h = next) {
+ next = h->dtha_next;
+ dtrace_helper_action_destroy(h, vstate);
+ h = next;
+ }
+ }
+
+ mutex_unlock(&dtrace_lock);
+
+ /*
+ * Destroy the helper providers.
+ */
+ if (help->dthps_maxprovs > 0) {
+ mutex_lock(&dtrace_meta_lock);
+ if (dtrace_meta_pid != NULL) {
+ ASSERT(dtrace_deferred_pid == NULL);
+
+ for (i = 0; i < help->dthps_nprovs; i++) {
+ dtrace_helper_provider_remove(
+ &help->dthps_provs[i]->dthp_prov,
+ tsk->pid);
+ }
+ } else {
+ mutex_lock(&dtrace_lock);
+ ASSERT(help->dthps_deferred == 0 ||
+ help->dthps_next != NULL ||
+ help->dthps_prev != NULL ||
+ help == dtrace_deferred_pid);
+
+ /*
+ * Remove the helper from the deferred list.
+ */
+ if (help->dthps_next != NULL)
+ help->dthps_next->dthps_prev = help->dthps_prev;
+ if (help->dthps_prev != NULL)
+ help->dthps_prev->dthps_next = help->dthps_next;
+ if (dtrace_deferred_pid == help) {
+ dtrace_deferred_pid = help->dthps_next;
+ ASSERT(help->dthps_prev == NULL);
+ }
+
+ mutex_unlock(&dtrace_lock);
+ }
+
+ mutex_unlock(&dtrace_meta_lock);
+
+ for (i = 0; i < help->dthps_nprovs; i++)
+ dtrace_helper_provider_destroy(help->dthps_provs[i]);
+
+ vfree(help->dthps_provs);
+ }
+
+ mutex_lock(&dtrace_lock);
+
+ dtrace_vstate_fini(&help->dthps_vstate);
+ vfree(help->dthps_actions);
+ vfree(help);
+
+ --dtrace_helpers;
+ mutex_unlock(&dtrace_lock);
+}
+
+int dtrace_helper_destroygen(int gen)
+{
+ struct task_struct *p = current;
+ dtrace_helpers_t *dth = p->dtrace_helpers;
+ dtrace_vstate_t *vstate;
+ int i;
+
+ ASSERT(MUTEX_HELD(&dtrace_lock));
+
+ if (dth == NULL || gen > dth->dthps_generation)
+ return -EINVAL;
+
+ vstate = &dth->dthps_vstate;
+
+ for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
+ dtrace_helper_action_t *last = NULL, *h, *next;
+
+ for (h = dth->dthps_actions[i]; h != NULL; h = next) {
+ next = h->dtha_next;
+
+ dt_dbg_dof(" Comparing action (agen %d vs rgen %d)\n",
+ h->dtha_generation, gen);
+
+ if (h->dtha_generation == gen) {
+ if (last != NULL)
+ last->dtha_next = next;
+ else
+ dth->dthps_actions[i] = next;
+
+ dtrace_helper_action_destroy(h, vstate);
+ } else
+ last = h;
+ }
+ }
+
+ /*
+ * Iterate until we've cleared out all helper providers with the given
+ * generation number.
+ */
+ for (;;) {
+ dtrace_helper_provider_t *prov = NULL;
+
+ /*
+ * Look for a helper provider with the right generation. We
+ * have to start back at the beginning of the list each time
+ * because we drop dtrace_lock. It's unlikely that we'll make
+ * more than two passes.
+ */
+ for (i = 0; i < dth->dthps_nprovs; i++) {
+ prov = dth->dthps_provs[i];
+
+ if (prov->dthp_generation == gen)
+ break;
+ }
+
+ /*
+ * If there were no matches, we are done.
+ */
+ if (i == dth->dthps_nprovs)
+ break;
+
+ dt_dbg_dof(" Found provider with gen %d\n", gen);
+
+ /*
+ * Move the last helper provider into this slot.
+ */
+ dth->dthps_nprovs--;
+ dth->dthps_provs[i] = dth->dthps_provs[dth->dthps_nprovs];
+ dth->dthps_provs[dth->dthps_nprovs] = NULL;
+
+ mutex_unlock(&dtrace_lock);
+
+ /*
+ * If we have a meta provider, remove this helper provider.
+ */
+ mutex_lock(&dtrace_meta_lock);
+
+ if (dtrace_meta_pid != NULL) {
+ ASSERT(dtrace_deferred_pid == NULL);
+
+ dtrace_helper_provider_remove(&prov->dthp_prov,
+ p->pid);
+ }
+
+ mutex_unlock(&dtrace_meta_lock);
+
+ dtrace_helper_provider_destroy(prov);
+
+ mutex_lock(&dtrace_lock);
+ }
+
+ return 0;
+}
+
+static void dtrace_helper_trace(dtrace_helper_action_t *helper,
+ dtrace_mstate_t *mstate,
+ dtrace_vstate_t *vstate, int where)
+{
+ uint32_t size, next, nnext, i;
+ dtrace_helptrace_t *ent;
+ uint16_t flags = this_cpu_core->cpuc_dtrace_flags;
+
+ if (!dtrace_helptrace_enabled)
+ return;
+
+ ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
+
+ /*
+ * What would a tracing framework be without its own tracing
+ * framework? (Well, a hell of a lot simpler, for starters...)
+ */
+ size = sizeof(dtrace_helptrace_t) + dtrace_helptrace_nlocals *
+ sizeof(uint64_t) - sizeof(uint64_t);
+
+ /*
+ * Iterate until we can allocate a slot in the trace buffer.
+ */
+ do {
+ next = dtrace_helptrace_next;
+
+ if (next + size < dtrace_helptrace_bufsize)
+ nnext = next + size;
+ else
+ nnext = size;
+ } while (cmpxchg(&dtrace_helptrace_next, next, nnext) != next);
+
+ /*
+ * We have our slot; fill it in.
+ */
+ if (nnext == size)
+ next = 0;
+
+ ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
+ ent->dtht_helper = helper;
+ ent->dtht_where = where;
+ ent->dtht_nlocals = vstate->dtvs_nlocals;
+
+ ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS)
+ ? mstate->dtms_fltoffs
+ : -1;
+ ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
+ ent->dtht_illval = this_cpu_core->cpuc_dtrace_illval;
+
+ for (i = 0; i < vstate->dtvs_nlocals; i++) {
+ dtrace_statvar_t *svar;
+
+ if ((svar = vstate->dtvs_locals[i]) == NULL)
+ continue;
+
+ ASSERT(svar->dtsv_size >= NR_CPUS * sizeof(uint64_t));
+ ent->dtht_locals[i] =
+ ((uint64_t *)(uintptr_t)svar->dtsv_data)[
+ smp_processor_id()];
+ }
+}
+
+uint64_t dtrace_helper(int which, dtrace_mstate_t *mstate,
+ dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
+{
+ uint16_t *flags = &this_cpu_core->cpuc_dtrace_flags;
+ uint64_t sarg0 = mstate->dtms_arg[0];
+ uint64_t sarg1 = mstate->dtms_arg[1];
+ uint64_t rval = 0;
+ dtrace_helpers_t *helpers = current->dtrace_helpers;
+ dtrace_helper_action_t *helper;
+ dtrace_vstate_t *vstate;
+ dtrace_difo_t *pred;
+ int i, trace = dtrace_helptrace_enabled;
+
+ ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
+
+ if (helpers == NULL)
+ return 0;
+
+ if ((helper = helpers->dthps_actions[which]) == NULL)
+ return 0;
+
+ vstate = &helpers->dthps_vstate;
+ mstate->dtms_arg[0] = arg0;
+ mstate->dtms_arg[1] = arg1;
+
+ /*
+ * Now iterate over each helper. If its predicate evaluates to 'true',
+ * we'll call the corresponding actions. Note that the below calls
+ * to dtrace_dif_emulate() may set faults in machine state. This is
+ * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
+ * the stored DIF offset with its own (which is the desired behavior).
+ * Also, note the calls to dtrace_dif_emulate() may allocate scratch
+ * from machine state; this is okay, too.
+ */
+ for (; helper != NULL; helper = helper->dtha_next) {
+ if ((pred = helper->dtha_predicate) != NULL) {
+ if (trace)
+ dtrace_helper_trace(helper, mstate, vstate, 0);
+
+ if (!dtrace_dif_emulate(pred, mstate, vstate, state))
+ goto next;
+
+ if (*flags & CPU_DTRACE_FAULT)
+ goto err;
+ }
+
+ for (i = 0; i < helper->dtha_nactions; i++) {
+ if (trace)
+ dtrace_helper_trace(helper, mstate, vstate,
+ i + 1);
+
+ rval = dtrace_dif_emulate(helper->dtha_actions[i],
+ mstate, vstate, state);
+
+ if (*flags & CPU_DTRACE_FAULT)
+ goto err;
+ }
+
+next:
+ if (trace)
+ dtrace_helper_trace(helper, mstate, vstate,
+ DTRACE_HELPTRACE_NEXT);
+ }
+
+ if (trace)
+ dtrace_helper_trace(helper, mstate, vstate,
+ DTRACE_HELPTRACE_DONE);
+
+ /*
+ * Restore the arg0 that we saved upon entry.
+ */
+ mstate->dtms_arg[0] = sarg0;
+ mstate->dtms_arg[1] = sarg1;
+
+ return rval;
+
+err:
+ if (trace)
+ dtrace_helper_trace(helper, mstate, vstate,
+ DTRACE_HELPTRACE_ERR);
+
+ /*
+ * Restore the arg0 that we saved upon entry.
+ */
+ mstate->dtms_arg[0] = sarg0;
+ mstate->dtms_arg[1] = sarg1;
+
+ return 0;
+}
+++ /dev/null
-/*
- * FILE: dtrace_helper.c
- * DESCRIPTION: Dynamic Tracing: helper functions
- *
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- * Copyright 2010, 2011 Oracle, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#include <linux/dtrace_cpu.h>
-
-#include "dtrace.h"
-
-static uint32_t dtrace_helptrace_next = 0;
-static uint32_t dtrace_helptrace_nlocals;
-static char *dtrace_helptrace_buffer;
-static int dtrace_helptrace_bufsize = 512 * 1024;
-
-#ifdef CONFIG_DT_DEBUG
-static int dtrace_helptrace_enabled = 1;
-#else
-static int dtrace_helptrace_enabled = 0;
-#endif
-
-static void dtrace_helper_trace(dtrace_helper_action_t *helper,
- dtrace_mstate_t *mstate,
- dtrace_vstate_t *vstate, int where)
-{
- uint32_t size, next, nnext, i;
- dtrace_helptrace_t *ent;
- uint16_t flags = this_cpu_core->cpuc_dtrace_flags;
-
- if (!dtrace_helptrace_enabled)
- return;
-
- ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
-
- /*
- * What would a tracing framework be without its own tracing
- * framework? (Well, a hell of a lot simpler, for starters...)
- */
- size = sizeof(dtrace_helptrace_t) + dtrace_helptrace_nlocals *
- sizeof(uint64_t) - sizeof(uint64_t);
-
- /*
- * Iterate until we can allocate a slot in the trace buffer.
- */
- do {
- next = dtrace_helptrace_next;
-
- if (next + size < dtrace_helptrace_bufsize)
- nnext = next + size;
- else
- nnext = size;
- } while (cmpxchg(&dtrace_helptrace_next, next, nnext) != next);
-
- /*
- * We have our slot; fill it in.
- */
- if (nnext == size)
- next = 0;
-
- ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
- ent->dtht_helper = helper;
- ent->dtht_where = where;
- ent->dtht_nlocals = vstate->dtvs_nlocals;
-
- ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS)
- ? mstate->dtms_fltoffs
- : -1;
- ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
- ent->dtht_illval = this_cpu_core->cpuc_dtrace_illval;
-
- for (i = 0; i < vstate->dtvs_nlocals; i++) {
- dtrace_statvar_t *svar;
-
- if ((svar = vstate->dtvs_locals[i]) == NULL)
- continue;
-
- ASSERT(svar->dtsv_size >= NR_CPUS * sizeof(uint64_t));
- ent->dtht_locals[i] =
- ((uint64_t *)(uintptr_t)svar->dtsv_data)[
- smp_processor_id()];
- }
-}
-
-uint64_t dtrace_helper(int which, dtrace_mstate_t *mstate,
- dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
-{
- uint16_t *flags = &this_cpu_core->cpuc_dtrace_flags;
- uint64_t sarg0 = mstate->dtms_arg[0];
- uint64_t sarg1 = mstate->dtms_arg[1];
- uint64_t rval = 0;
- dtrace_helpers_t *helpers = current->dtrace_helpers;
- dtrace_helper_action_t *helper;
- dtrace_vstate_t *vstate;
- dtrace_difo_t *pred;
- int i, trace = dtrace_helptrace_enabled;
-
- ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
-
- if (helpers == NULL)
- return 0;
-
- if ((helper = helpers->dthps_actions[which]) == NULL)
- return 0;
-
- vstate = &helpers->dthps_vstate;
- mstate->dtms_arg[0] = arg0;
- mstate->dtms_arg[1] = arg1;
-
- /*
- * Now iterate over each helper. If its predicate evaluates to 'true',
- * we'll call the corresponding actions. Note that the below calls
- * to dtrace_dif_emulate() may set faults in machine state. This is
- * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
- * the stored DIF offset with its own (which is the desired behavior).
- * Also, note the calls to dtrace_dif_emulate() may allocate scratch
- * from machine state; this is okay, too.
- */
- for (; helper != NULL; helper = helper->dtha_next) {
- if ((pred = helper->dtha_predicate) != NULL) {
- if (trace)
- dtrace_helper_trace(helper, mstate, vstate, 0);
-
- if (!dtrace_dif_emulate(pred, mstate, vstate, state))
- goto next;
-
- if (*flags & CPU_DTRACE_FAULT)
- goto err;
- }
-
- for (i = 0; i < helper->dtha_nactions; i++) {
- if (trace)
- dtrace_helper_trace(helper, mstate, vstate,
- i + 1);
-
- rval = dtrace_dif_emulate(helper->dtha_actions[i],
- mstate, vstate, state);
-
- if (*flags & CPU_DTRACE_FAULT)
- goto err;
- }
-
-next:
- if (trace)
- dtrace_helper_trace(helper, mstate, vstate,
- DTRACE_HELPTRACE_NEXT);
- }
-
- if (trace)
- dtrace_helper_trace(helper, mstate, vstate,
- DTRACE_HELPTRACE_DONE);
-
- /*
- * Restore the arg0 that we saved upon entry.
- */
- mstate->dtms_arg[0] = sarg0;
- mstate->dtms_arg[1] = sarg1;
-
- return rval;
-
-err:
- if (trace)
- dtrace_helper_trace(helper, mstate, vstate,
- DTRACE_HELPTRACE_ERR);
-
- /*
- * Restore the arg0 that we saved upon entry.
- */
- mstate->dtms_arg[0] = sarg0;
- mstate->dtms_arg[1] = sarg1;
-
- return 0;
-}
#include "dtrace.h"
dtrace_provider_t *dtrace_provider;
+dtrace_meta_t *dtrace_meta_pid;
+dtrace_helpers_t *dtrace_deferred_pid;
DEFINE_MUTEX(dtrace_lock);
DEFINE_MUTEX(dtrace_provider_lock);
* be called by providers during module initialization.
*/
int dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
- cred_t *cr, const dtrace_pops_t *pops, void *arg,
+ const cred_t *cr, const dtrace_pops_t *pops, void *arg,
dtrace_provider_id_t *idp)
{
dtrace_provider_t *provider;
}
/*
- * Remove the given probe from the hash tables and the probe IDR. The probes
- * are chained for further processing.
+ * Remove the given probe from the hash tables and the probe IDR, if it is
+ * associated with the given provider. The probes are chained for further
+ * processing.
*/
static int dtrace_unregister_probe(int id, void *p, void *data)
{
return 0;
}
+/*
+ * Remove the given probe from the hash tables and the probe IDR, if it is
+ * associated with the given provider and if it does not have any enablings.
+ * The probes are chained for further processing.
+ */
+static int dtrace_condense_probe(int id, void *p, void *data)
+{
+ dtrace_probe_t *probe = (dtrace_probe_t *)p;
+ struct unreg_state *st = (struct unreg_state *)data;
+
+ if (probe->dtpr_provider != st->prov)
+ return 0;
+
+ if (probe->dtpr_ecb == NULL)
+ return 0;
+
+ dtrace_hash_remove(dtrace_bymod, probe);
+ dtrace_hash_remove(dtrace_byfunc, probe);
+ dtrace_hash_remove(dtrace_byname, probe);
+
+ if (st->first == NULL) {
+ st->first = probe;
+ probe->dtpr_nextmod = NULL;
+ } else {
+ probe->dtpr_nextmod = st->first;
+ st->first = probe;
+ }
+
+ return 0;
+}
+
/*
* Unregister the specified provider from the DTrace core. This should be
* called by provider during module cleanup.
int dtrace_condense(dtrace_provider_id_t id)
{
dtrace_provider_t *prov = (dtrace_provider_t *)id;
- int i;
dtrace_probe_t *probe;
+ struct unreg_state st = {
+ prov,
+ NULL
+ };
/*
* Make sure this isn't the DTrace provider itself.
mutex_lock(&dtrace_provider_lock);
mutex_lock(&dtrace_lock);
- /* FIXME - INCOMPLETE */
+ /*
+ * Attempt to destroy the probes associated with this provider.
+ */
+ dtrace_probe_for_each(dtrace_condense_probe, &st);
+
+ /*
+ * The probes associated with the provider have been removed. Ensure
+ * synchronization on probe IDR processing.
+ */
+ dtrace_sync();
+
+ /*
+ * Now get rid of the actual probes.
+ */
+ for (probe = st.first; probe != NULL; probe = st.first) {
+ int probe_id = probe->dtpr_id;
+
+ st.first = probe->dtpr_nextmod;
+
+ prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe_id,
+ probe->dtpr_arg);
+
+ kfree(probe->dtpr_mod);
+ kfree(probe->dtpr_func);
+ kfree(probe->dtpr_name);
+ kfree(probe);
+
+ dtrace_probe_remove_id(probe_id);
+ }
mutex_unlock(&dtrace_lock);
mutex_unlock(&dtrace_provider_lock);
return 0;
}
EXPORT_SYMBOL(dtrace_condense);
+
+int dtrace_meta_register(const char *name, const dtrace_mops_t *mops,
+ void *arg, dtrace_meta_provider_id_t *idp)
+{
+ dtrace_meta_t *meta;
+ dtrace_helpers_t *help, *next;
+ int i;
+
+ *idp = DTRACE_METAPROVNONE;
+
+ /*
+ * We strictly don't need the name, but we hold onto it for
+ * debuggability. All hail error queues!
+ */
+ if (name == NULL) {
+ pr_warn("failed to register meta-provider: invalid name\n");
+ return -EINVAL;
+ }
+
+ if (mops == NULL ||
+ mops->dtms_create_probe == NULL ||
+ mops->dtms_provide_pid == NULL ||
+ mops->dtms_remove_pid == NULL) {
+ pr_warn("failed to register meta-register %s: invalid ops\n",
+ name);
+ return -EINVAL;
+ }
+
+ meta = kzalloc(sizeof(dtrace_meta_t), GFP_KERNEL);
+ meta->dtm_mops = *mops;
+ meta->dtm_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
+ strcpy(meta->dtm_name, name);
+ meta->dtm_arg = arg;
+
+ mutex_lock(&dtrace_meta_lock);
+ mutex_lock(&dtrace_lock);
+
+ if (dtrace_meta_pid != NULL) {
+ mutex_unlock(&dtrace_lock);
+ mutex_unlock(&dtrace_meta_lock);
+ pr_warn("failed to register meta-register %s: user-land "
+ "meta-provider exists", name);
+ kfree(meta->dtm_name);
+ kfree(meta);
+ return -EINVAL;
+ }
+
+ dtrace_meta_pid = meta;
+ *idp = (dtrace_meta_provider_id_t)meta;
+
+ /*
+ * If there are providers and probes ready to go, pass them
+ * off to the new meta provider now.
+ */
+ help = dtrace_deferred_pid;
+ dtrace_deferred_pid = NULL;
+
+ mutex_unlock(&dtrace_lock);
+
+ while (help != NULL) {
+ for (i = 0; i < help->dthps_nprovs; i++) {
+ dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
+ help->dthps_pid);
+ }
+
+ next = help->dthps_next;
+ help->dthps_next = NULL;
+ help->dthps_prev = NULL;
+ help->dthps_deferred = 0;
+ help = next;
+ }
+
+ mutex_unlock(&dtrace_meta_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(dtrace_meta_register);
+
+int dtrace_meta_unregister(dtrace_meta_provider_id_t id)
+{
+ dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
+
+ mutex_lock(&dtrace_meta_lock);
+ mutex_lock(&dtrace_lock);
+
+ if (old == dtrace_meta_pid) {
+ pp = &dtrace_meta_pid;
+ } else {
+ pr_err("Attempt to unregister non-existent DTrace meta-"
+ "provider %p\n", (void *)old);
+ BUG();
+ }
+
+ if (old->dtm_count != 0) {
+ mutex_unlock(&dtrace_lock);
+ mutex_unlock(&dtrace_meta_lock);
+ return -EBUSY;
+ }
+
+ *pp = NULL;
+
+ mutex_unlock(&dtrace_lock);
+ mutex_unlock(&dtrace_meta_lock);
+
+ kfree(old->dtm_name);
+ kfree(old);
+
+ return 0;
+}
+EXPORT_SYMBOL(dtrace_meta_unregister);
+++ /dev/null
-#ifndef _FASTTRAP_H_
-#define _FASTTRAP_H_
-
-extern int fasttrap_dev_init(void);
-extern void fasttrap_dev_exit(void);
-
-#endif /* _FASTTRAP_H_ */
*
* CDDL HEADER END
*
- * Copyright 2010, 2011 Oracle, Inc. All rights reserved.
+ * Copyright 2010, 2011, 2012, 2013 Oracle, Inc. All rights reserved.
* Use is subject to license terms.
*/
+#include <linux/atomic.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/workqueue.h>
#include "dtrace.h"
#include "dtrace_dev.h"
+#include "fasttrap_impl.h"
+
+#define FASTTRAP_MAX_DEFAULT 250000
+static uint32_t fasttrap_max;
+static atomic_t fasttrap_total;
+
+#define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
+#define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
+#define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
+
+#define FASTTRAP_PID_NAME "pid"
+
+fasttrap_hash_t fasttrap_tpoints;
+static fasttrap_hash_t fasttrap_provs;
+static fasttrap_hash_t fasttrap_procs;
+
+#define FASTTRAP_PROVS_INDEX(pid, name) \
+ ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
+#define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
+
+#define CLEANUP_NONE 0
+#define CLEANUP_SCHEDULED 1
+#define CLEANUP_DEFERRED 2
+
+DEFINE_MUTEX(fasttrap_cleanup_mtx);
+static uint_t fasttrap_cleanup_state;
+static uint_t fasttrap_cleanup_work;
+
+static void fasttrap_pid_provide(void *arg, const dtrace_probedesc_t *desc)
+{
+ /*
+ * There are no "default" pid probes.
+ */
+}
+
+static int
+fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
+{
+ return 0; /* FIXME */
+}
+
+static void fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
+{
+ /* FIXME */
+}
+
+static void fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
+ dtrace_argdesc_t *desc)
+{
+ /* FIXME */
+}
+
+static uint64_t fasttrap_usdt_getarg(void *arg, dtrace_id_t id, void *parg,
+ int argno, int aframes)
+{
+ return 0; /* FIXME */
+}
+
+static void fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
+{
+ /* FIXME */
+}
+
+static const dtrace_pattr_t pid_attr = {
+{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
+{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
+{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
+{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
+{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
+};
+
+static dtrace_pops_t pid_pops = {
+#ifdef FIXME
+ fasttrap_pid_provide,
+ NULL,
+ fasttrap_pid_enable,
+ fasttrap_pid_disable,
+ NULL,
+ NULL,
+ fasttrap_pid_getargdesc,
+ fasttrap_pid_getarg,
+ NULL,
+ fasttrap_pid_destroy
+#endif
+};
+
+static dtrace_pops_t usdt_pops = {
+ fasttrap_pid_provide,
+ NULL,
+ fasttrap_pid_enable,
+ fasttrap_pid_disable,
+ NULL,
+ NULL,
+ fasttrap_pid_getargdesc,
+ fasttrap_usdt_getarg,
+ NULL,
+ fasttrap_pid_destroy
+};
+
+static uint_t fasttrap_hash_str(const char *p)
+{
+ unsigned int g;
+ uint_t hval = 0;
+
+ while (*p) {
+ hval = (hval << 4) + *p++;
+ if ((g = (hval & 0xf0000000)) != 0)
+ hval ^= g >> 24;
+ hval &= ~g;
+ }
+
+ return hval;
+}
+
+static int fasttrap_uint32_cmp(const void *ap, const void *bp)
+{
+ return (*(const uint32_t *)ap - *(const uint32_t *)bp);
+}
+
+static int fasttrap_uint64_cmp(const void *ap, const void *bp)
+{
+ return (*(const uint64_t *)ap - *(const uint64_t *)bp);
+}
+
+void fasttrap_meta_create_probe(void *arg, void *parg,
+ dtrace_helper_probedesc_t *dhpb)
+{
+ fasttrap_provider_t *provider = parg;
+ fasttrap_probe_t *pp;
+ fasttrap_tracepoint_t *tp;
+ int i, j;
+ uint32_t ntps;
+
+ /*
+ * Since the meta provider count is non-zero we don't have to worry
+ * about this provider disappearing.
+ */
+ ASSERT(provider->ftp_mcount > 0);
+
+ /*
+ * The offsets must be unique.
+ */
+ sort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof(uint32_t),
+ fasttrap_uint32_cmp, NULL);
+ for (i = 1; i < dhpb->dthpb_noffs; i++) {
+ if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
+ dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
+ return;
+ }
+
+ sort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof(uint32_t),
+ fasttrap_uint32_cmp, NULL);
+ for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
+ if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
+ dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
+ return;
+ }
+
+ /*
+ * Grab the creation lock to ensure consistency between calls to
+ * dtrace_probe_lookup() and dtrace_probe_create() in the face of
+ * other threads creating probes.
+ */
+ mutex_lock(&provider->ftp_cmtx);
+
+ if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
+ dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
+ mutex_unlock(&provider->ftp_cmtx);
+ return;
+ }
+
+ ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
+ ASSERT(ntps > 0);
+
+ atomic_add(ntps, &fasttrap_total);
+
+ if (atomic_read(&fasttrap_total) > fasttrap_max) {
+ atomic_add(-ntps, &fasttrap_total);
+ mutex_unlock(&provider->ftp_cmtx);
+ return;
+ }
+
+ pp = kzalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), GFP_KERNEL);
+
+ pp->ftp_prov = provider;
+ pp->ftp_pid = provider->ftp_pid;
+ pp->ftp_ntps = ntps;
+ pp->ftp_nargs = dhpb->dthpb_xargc;
+ pp->ftp_xtypes = dhpb->dthpb_xtypes;
+ pp->ftp_ntypes = dhpb->dthpb_ntypes;
+
+ /*
+ * First create a tracepoint for each actual point of interest.
+ */
+ for (i = 0; i < dhpb->dthpb_noffs; i++) {
+ tp = kzalloc(sizeof(fasttrap_tracepoint_t), GFP_KERNEL);
+
+ tp->ftt_proc = provider->ftp_proc;
+ tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i];
+ tp->ftt_pid = provider->ftp_pid;
+
+ pp->ftp_tps[i].fit_tp = tp;
+ pp->ftp_tps[i].fit_id.fti_probe = pp;
+#ifdef __sparc
+ pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS;
+#else
+ pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
+#endif
+ }
+
+ /*
+ * Then create a tracepoint for each is-enabled point.
+ */
+ for (j = 0; i < ntps; i++, j++) {
+ tp = kzalloc(sizeof(fasttrap_tracepoint_t), GFP_KERNEL);
+
+ tp->ftt_proc = provider->ftp_proc;
+ tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j];
+ tp->ftt_pid = provider->ftp_pid;
+
+ pp->ftp_tps[i].fit_tp = tp;
+ pp->ftp_tps[i].fit_id.fti_probe = pp;
+ pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
+ }
+
+ /*
+ * If the arguments are shuffled around we set the argument remapping
+ * table. Later, when the probe fires, we only remap the arguments
+ * if the table is non-NULL.
+ */
+ for (i = 0; i < dhpb->dthpb_xargc; i++) {
+ if (dhpb->dthpb_args[i] != i) {
+ pp->ftp_argmap = dhpb->dthpb_args;
+ break;
+ }
+ }
+
+ /*
+ * The probe is fully constructed -- register it with DTrace.
+ */
+ pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
+ dhpb->dthpb_func, dhpb->dthpb_name,
+ FASTTRAP_OFFSET_AFRAMES, pp);
+
+ mutex_unlock(&provider->ftp_cmtx);
+}
+
+static void fasttrap_proc_release(fasttrap_proc_t *proc)
+{
+ fasttrap_bucket_t *bucket;
+ fasttrap_proc_t *fprc, **fprcp;
+ pid_t pid = proc->ftpc_pid;
+
+ mutex_lock(&proc->ftpc_mtx);
+
+ ASSERT(proc->ftpc_rcount != 0);
+ ASSERT(atomic64_read(&proc->ftpc_acount) <= proc->ftpc_rcount);
+
+ if (--proc->ftpc_rcount != 0) {
+ mutex_unlock(&proc->ftpc_mtx);
+ return;
+ }
+
+ mutex_unlock(&proc->ftpc_mtx);
+
+ /*
+ * There should definitely be no live providers associated with this
+ * process at this point.
+ */
+ ASSERT(atomic64_read(&proc->ftpc_acount) == 0);
+
+ bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
+ mutex_lock(&bucket->ftb_mtx);
+
+ fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
+ while ((fprc = *fprcp) != NULL) {
+ if (fprc == proc)
+ break;
+
+ fprcp = &fprc->ftpc_next;
+ }
+
+ /*
+ * Something strange has happened if we can't find the proc.
+ */
+ ASSERT(fprc != NULL);
+
+ *fprcp = fprc->ftpc_next;
+
+ mutex_unlock(&bucket->ftb_mtx);
+
+ kfree(fprc);
+}
+
+static void fasttrap_provider_free(fasttrap_provider_t *provider)
+{
+ pid_t pid = provider->ftp_pid;
+
+ /*
+ * There need to be no associated enabled probes, no consumers
+ * creating probes, and no meta providers referencing this provider.
+ */
+ ASSERT(provider->ftp_rcount == 0);
+ ASSERT(provider->ftp_ccount == 0);
+ ASSERT(provider->ftp_mcount == 0);
+
+ /*
+ * If this provider hasn't been retired, we need to explicitly drop the
+ * count of active providers on the associated process structure.
+ */
+ if (!provider->ftp_retired) {
+ atomic64_add(-1, &provider->ftp_proc->ftpc_acount);
+ ASSERT(atomic64_read(&provider->ftp_proc->ftpc_acount) <
+ provider->ftp_proc->ftpc_rcount);
+ }
+
+ fasttrap_proc_release(provider->ftp_proc);
+
+ kfree(provider);
+
+ unregister_pid_provider(pid);
+}
+
+static fasttrap_proc_t *fasttrap_proc_lookup(pid_t pid)
+{
+ fasttrap_bucket_t *bucket;
+ fasttrap_proc_t *fprc, *new_fprc;
+
+ bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
+ mutex_lock(&bucket->ftb_mtx);
+
+ for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
+ if (fprc->ftpc_pid == pid &&
+ atomic64_read(&fprc->ftpc_acount) != 0) {
+ mutex_lock(&fprc->ftpc_mtx);
+ mutex_unlock(&bucket->ftb_mtx);
+ fprc->ftpc_rcount++;
+ atomic64_inc(&fprc->ftpc_acount);
+ ASSERT(atomic64_read(&fprc->ftpc_acount) <=
+ fprc->ftpc_rcount);
+ mutex_unlock(&fprc->ftpc_mtx);
+
+ return fprc;
+ }
+ }
+
+ /*
+ * Drop the bucket lock so we don't try to perform a sleeping
+ * allocation under it.
+ */
+ mutex_unlock(&bucket->ftb_mtx);
+
+ new_fprc = kzalloc(sizeof(fasttrap_proc_t), GFP_KERNEL);
+ new_fprc->ftpc_pid = pid;
+ new_fprc->ftpc_rcount = 1;
+ atomic64_set(&new_fprc->ftpc_acount, 1);
+ mutex_init(&new_fprc->ftpc_mtx);
+
+ mutex_lock(&bucket->ftb_mtx);
+
+ /*
+ * Take another lap through the list to make sure a proc hasn't
+ * been created for this pid while we weren't under the bucket lock.
+ */
+ for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
+ if (fprc->ftpc_pid == pid &&
+ atomic64_read(&fprc->ftpc_acount) != 0) {
+ mutex_lock(&fprc->ftpc_mtx);
+ mutex_unlock(&bucket->ftb_mtx);
+ fprc->ftpc_rcount++;
+ atomic64_inc(&fprc->ftpc_acount);
+ ASSERT(atomic64_read(&fprc->ftpc_acount) <=
+ fprc->ftpc_rcount);
+ mutex_unlock(&fprc->ftpc_mtx);
+
+ kfree(new_fprc);
+
+ return fprc;
+ }
+ }
+
+ new_fprc->ftpc_next = bucket->ftb_data;
+ bucket->ftb_data = new_fprc;
+
+ mutex_unlock(&bucket->ftb_mtx);
+
+ return new_fprc;
+}
+
+/*
+ * Lookup a fasttrap-managed provider based on its name and associated pid.
+ * If the pattr argument is non-NULL, this function instantiates the provider
+ * if it doesn't exist otherwise it returns NULL. The provider is returned
+ * with its lock held.
+ */
+static fasttrap_provider_t *fasttrap_provider_lookup(pid_t pid,
+ const char *name,
+ const dtrace_pattr_t *pa)
+{
+ fasttrap_provider_t *fp, *new_fp = NULL;
+ fasttrap_bucket_t *bucket;
+ char provname[DTRACE_PROVNAMELEN];
+ struct task_struct *p;
+ const cred_t *cred;
+
+ ASSERT(strlen(name) < sizeof (fp->ftp_name));
+ ASSERT(pa != NULL);
+
+ bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
+ mutex_lock(&bucket->ftb_mtx);
+
+ /*
+ * Take a lap through the list and return the match if we find it.
+ */
+ for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
+ if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
+ !fp->ftp_retired) {
+ mutex_lock(&fp->ftp_mtx);
+ mutex_unlock(&bucket->ftb_mtx);
+ return fp;
+ }
+ }
+
+ /*
+ * Drop the bucket lock so we don't try to perform a sleeping
+ * allocation under it.
+ */
+ mutex_unlock(&bucket->ftb_mtx);
+
+ if ((p = register_pid_provider(pid)) == NULL)
+ return NULL;
+
+ /*
+ * Grab the credentials for this process so we have
+ * something to pass to dtrace_register().
+ */
+ cred = get_cred(p->cred);
+
+ new_fp = kzalloc(sizeof(fasttrap_provider_t), GFP_KERNEL);
+ new_fp->ftp_pid = pid;
+ new_fp->ftp_proc = fasttrap_proc_lookup(pid);
+ mutex_init(&new_fp->ftp_mtx);
+ mutex_init(&new_fp->ftp_cmtx);
+
+ ASSERT(new_fp->ftp_proc != NULL);
+
+ mutex_lock(&bucket->ftb_mtx);
+
+ /*
+ * Take another lap through the list to make sure a provider hasn't
+ * been created for this pid while we weren't under the bucket lock.
+ */
+ for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
+ if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
+ !fp->ftp_retired) {
+ mutex_lock(&fp->ftp_mtx);
+ mutex_unlock(&bucket->ftb_mtx);
+ fasttrap_provider_free(new_fp);
+ put_cred(cred);
+ return fp;
+ }
+ }
+
+ strcpy(new_fp->ftp_name, name);
+
+ /*
+ * Fail and return NULL if either the provider name is too long
+ * or we fail to register this new provider with the DTrace
+ * framework. Note that this is the only place we ever construct
+ * the full provider name -- we keep it in pieces in the provider
+ * structure.
+ */
+ if (snprintf(provname, sizeof(provname), "%s%u", name, (uint_t)pid) >=
+ sizeof(provname) ||
+ dtrace_register(provname, pa,
+ DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER, cred,
+ pa == &pid_attr ? &pid_pops : &usdt_pops,
+ new_fp, &new_fp->ftp_provid) != 0) {
+ mutex_unlock(&bucket->ftb_mtx);
+ fasttrap_provider_free(new_fp);
+ put_cred(cred);
+ return NULL;
+ }
+
+ new_fp->ftp_next = bucket->ftb_data;
+ bucket->ftb_data = new_fp;
+
+ mutex_lock(&new_fp->ftp_mtx);
+ mutex_unlock(&bucket->ftb_mtx);
+
+ put_cred(cred);
+ return new_fp;
+}
+
+void *fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv,
+ pid_t pid)
+{
+ fasttrap_provider_t *provider;
+
+ if (strlen(dhpv->dthpv_provname) + 10 >= sizeof (provider->ftp_name)) {
+ pr_warn("Failed to instantiate provider %s: name too long "
+ "to accomodate pid\n", dhpv->dthpv_provname);
+ return NULL;
+ }
+
+ /*
+ * Don't let folks spoof the true pid provider.
+ */
+ if (strcmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME) == 0) {
+ pr_warn("Failed to instantiate provider %s: %s is an invalid "
+ "name\n", dhpv->dthpv_provname, FASTTRAP_PID_NAME);
+ return NULL;
+ }
+
+ /*
+ * The highest stability class that fasttrap supports is ISA; cap
+ * the stability of the new provider accordingly.
+ */
+ if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
+ dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
+ if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
+ dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
+ if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
+ dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
+ if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
+ dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
+ if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
+ dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
+
+ provider = fasttrap_provider_lookup(pid, dhpv->dthpv_provname,
+ &dhpv->dthpv_pattr);
+ if (provider == NULL) {
+ pr_warn("Failed to instantiate provider %s for process %u\n",
+ dhpv->dthpv_provname, (uint_t)pid);
+ return NULL;
+ }
+
+ /*
+ * Up the meta provider count so this provider isn't removed until the
+ * meta provider has been told to remove it.
+ */
+ provider->ftp_mcount++;
+
+ mutex_unlock(&provider->ftp_mtx);
+
+ return provider;
+}
+
+static void fasttrap_pid_cleanup_cb(struct work_struct *work)
+{
+ fasttrap_provider_t **fpp, *fp;
+ fasttrap_bucket_t *bucket;
+ dtrace_provider_id_t provid;
+ int i, later = 0;
+
+ static volatile int in = 0;
+
+ ASSERT(in == 0);
+ in = 1;
+
+ mutex_lock(&fasttrap_cleanup_mtx);
+ while (fasttrap_cleanup_work) {
+ fasttrap_cleanup_work = 0;
+ mutex_unlock(&fasttrap_cleanup_mtx);
+
+ later = 0;
+
+ /*
+ * Iterate over all the providers trying to remove the marked
+ * ones. If a provider is marked but not retired, we just
+ * have to take a crack at removing it -- it's no big deal if
+ * we can't.
+ */
+ for (i = 0; i < fasttrap_provs.fth_nent; i++) {
+ bucket = &fasttrap_provs.fth_table[i];
+ mutex_lock(&bucket->ftb_mtx);
+ fpp = (fasttrap_provider_t **)&bucket->ftb_data;
+
+ while ((fp = *fpp) != NULL) {
+ if (!fp->ftp_marked) {
+ fpp = &fp->ftp_next;
+ continue;
+ }
+
+ mutex_lock(&fp->ftp_mtx);
+
+ /*
+ * If this provider has consumers actively
+ * creating probes (ftp_ccount) or is a USDT
+ * provider (ftp_mcount), we can't unregister
+ * or even condense.
+ */
+ if (fp->ftp_ccount != 0 ||
+ fp->ftp_mcount != 0) {
+ mutex_unlock(&fp->ftp_mtx);
+ fp->ftp_marked = 0;
+ continue;
+ }
+
+ if (!fp->ftp_retired || fp->ftp_rcount != 0)
+ fp->ftp_marked = 0;
+
+ mutex_unlock(&fp->ftp_mtx);
+
+ /*
+ * If we successfully unregister this
+ * provider we can remove it from the hash
+ * chain and free the memory. If our attempt
+ * to unregister fails and this is a retired
+ * provider, increment our flag to try again
+ * pretty soon. If we've consumed more than
+ * half of our total permitted number of
+ * probes call dtrace_condense() to try to
+ * clean out the unenabled probes.
+ */
+ provid = fp->ftp_provid;
+ if (dtrace_unregister(provid) != 0) {
+ if (atomic_read(&fasttrap_total) >
+ fasttrap_max / 2)
+ dtrace_condense(provid);
+ later += fp->ftp_marked;
+ fpp = &fp->ftp_next;
+ } else {
+ *fpp = fp->ftp_next;
+ fasttrap_provider_free(fp);
+ }
+ }
+
+ mutex_unlock(&bucket->ftb_mtx);
+ }
+
+ mutex_lock(&fasttrap_cleanup_mtx);
+ }
+
+ ASSERT(fasttrap_cleanup_state != CLEANUP_NONE);
+
+ /*
+ * If we were unable to remove a retired provider, try again after
+ * a second. This situation can occur in certain circumstances where
+ * providers cannot be unregistered even though they have no probes
+ * enabled because of an execution of dtrace -l or something similar.
+ * If the timeout has been disabled (set to 1 because we're trying
+ * to detach), we set fasttrap_cleanup_work to ensure that we'll
+ * get a chance to do that work if and when the timeout is reenabled
+ * (if detach fails).
+ */
+ if (later > 0 && fasttrap_cleanup_state != CLEANUP_DEFERRED) {
+ struct delayed_work *dw = container_of(work,
+ struct delayed_work,
+ work);
+
+ fasttrap_cleanup_state = CLEANUP_SCHEDULED;
+ schedule_delayed_work(dw, HZ);
+ } else if (later > 0) {
+ fasttrap_cleanup_work = 1;
+ } else
+ fasttrap_cleanup_state = CLEANUP_NONE;
+
+ mutex_unlock(&fasttrap_cleanup_mtx);
+ in = 0;
+}
+
+static DECLARE_DELAYED_WORK(fasttrap_cleanup, fasttrap_pid_cleanup_cb);
+
+/*
+ * Activate the asynchronous cleanup mechanism.
+ */
+static void fasttrap_pid_cleanup(void)
+{
+ mutex_lock(&fasttrap_cleanup_mtx);
+ fasttrap_cleanup_work = 1;
+ fasttrap_cleanup_state = CLEANUP_SCHEDULED;
+ schedule_delayed_work(&fasttrap_cleanup, 3);
+ mutex_unlock(&fasttrap_cleanup_mtx);
+}
+
+void fasttrap_provider_retire(pid_t pid, const char *name, int mprov)
+{
+ fasttrap_provider_t *fp;
+ fasttrap_bucket_t *bucket;
+ dtrace_provider_id_t provid;
+
+ ASSERT(strlen(name) < sizeof (fp->ftp_name));
+
+ bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
+ mutex_lock(&bucket->ftb_mtx);
+
+ for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
+ if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
+ !fp->ftp_retired)
+ break;
+ }
+
+ if (fp == NULL) {
+ mutex_unlock(&bucket->ftb_mtx);
+ return;
+ }
+
+ mutex_lock(&fp->ftp_mtx);
+ ASSERT(!mprov || fp->ftp_mcount > 0);
+ if (mprov && --fp->ftp_mcount != 0) {
+ mutex_unlock(&fp->ftp_mtx);
+ mutex_unlock(&bucket->ftb_mtx);
+ return;
+ }
+
+ /*
+ * Mark the provider to be removed in our post-processing step, mark it
+ * retired, and drop the active count on its proc. Marking it indicates
+ * that we should try to remove it; setting the retired flag indicates
+ * that we're done with this provider; dropping the active count on the
+ * proc releases our hold, and when this reaches zero (as it will
+ * during exit or exec) the proc and associated providers become
+ * defunct.
+ *
+ * We obviously need to take the bucket lock before the provider lock
+ * to perform the lookup, but we need to drop the provider lock
+ * before calling into the DTrace framework since we acquire the
+ * provider lock in callbacks invoked from the DTrace framework. The
+ * bucket lock therefore protects the integrity of the provider hash
+ * table.
+ */
+ atomic64_dec(&fp->ftp_proc->ftpc_acount);
+ ASSERT(atomic64_read(&fp->ftp_proc->ftpc_acount) <
+ fp->ftp_proc->ftpc_rcount);
+
+ fp->ftp_retired = 1;
+ fp->ftp_marked = 1;
+ provid = fp->ftp_provid;
+ mutex_unlock(&fp->ftp_mtx);
+
+ /*
+ * We don't have to worry about invalidating the same provider twice
+ * since fasttrap_provider_lookup() will ignore provider that have
+ * been marked as retired.
+ */
+ dtrace_invalidate(provid);
+
+ mutex_unlock(&bucket->ftb_mtx);
+
+ fasttrap_pid_cleanup();
+}
+
+void fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
+{
+ /*
+ * Clean up the USDT provider. There may be active consumers of the
+ * provider busy adding probes, no damage will actually befall the
+ * provider until that count has dropped to zero. This just puts
+ * the provider on death row.
+ */
+ fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1);
+}
static long fasttrap_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
.fops = &fasttrap_fops,
};
+static void fasttrap_init_htable(fasttrap_hash_t *fth, ulong_t nent)
+{
+ ulong_t i;
+
+ if ((nent & (nent - 1)) == 0)
+ fth->fth_nent = nent;
+ else
+ fth->fth_nent = 1 << fls(nent);
+
+ ASSERT(fth->fth_nent > 0);
+
+ fth->fth_mask = fth->fth_nent - 1;
+ fth->fth_table = kzalloc(fth->fth_nent * sizeof(fasttrap_bucket_t),
+ GFP_KERNEL);
+
+ for (i = 0; i < fth->fth_nent; i++)
+ mutex_init(&fth->fth_table[i].ftb_mtx);
+}
+
int fasttrap_dev_init(void)
{
- int ret = 0;
+ int ret = 0;
+ ulong_t nent;
ret = misc_register(&fasttrap_dev);
- if (ret)
+ if (ret) {
pr_err("%s: Can't register misc device %d\n",
fasttrap_dev.name, fasttrap_dev.minor);
+ goto fail;
+ }
+
+#ifdef FIXME
+ dtrace_fasttrap_fork_ptr = &fasttrap_fork;
+ dtrace_fasttrap_exit_ptr = &fasttrap_exec_exit;
+ dtrace_fasttrap_exec_ptr = &fasttrap_exec_exit;
+#endif
+
+ fasttrap_max = FASTTRAP_MAX_DEFAULT;
+ atomic_set(&fasttrap_total, 0);
+
+ /*
+ * Conjure up the tracepoints hashtable...
+ */
+ nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
+ if (nent == 0 || nent > 0x1000000)
+ nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
+
+ fasttrap_init_htable(&fasttrap_tpoints, nent);
+
+ /*
+ * ... and the providers hash table...
+ */
+ nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
+ fasttrap_init_htable(&fasttrap_provs, nent);
+
+ /*
+ * ... and the procs hash table.
+ */
+ nent = FASTTRAP_PROCS_DEFAULT_SIZE;
+ fasttrap_init_htable(&fasttrap_procs, nent);
+
+fail:
return ret;
}
void fasttrap_dev_exit(void)
{
+ int fail = 0;
+ ulong_t i;
+
+ /*
+ * Prevent any new timeouts from running by setting fasttrap_timeout
+ * to a non-zero value, and wait for the current timeout to complete.
+ */
+ mutex_lock(&fasttrap_cleanup_mtx);
+ fasttrap_cleanup_work = 0;
+
+ while (fasttrap_cleanup_state != CLEANUP_DEFERRED) {
+ uint_t tmp;
+
+ tmp = fasttrap_cleanup_state;
+ fasttrap_cleanup_state = CLEANUP_DEFERRED;
+
+ if (tmp != CLEANUP_NONE) {
+ mutex_unlock(&fasttrap_cleanup_mtx);
+ flush_delayed_work_sync(&fasttrap_cleanup);
+ mutex_lock(&fasttrap_cleanup_mtx);
+ }
+ }
+
+ fasttrap_cleanup_work = 0;
+ mutex_unlock(&fasttrap_cleanup_mtx);
+
+ /*
+ * Iterate over all of our providers. If there's still a process
+ * that corresponds to that pid, fail to detach.
+ */
+ for (i = 0; i < fasttrap_provs.fth_nent; i++) {
+ fasttrap_provider_t **fpp, *fp;
+ fasttrap_bucket_t *bucket = &fasttrap_provs.fth_table[i];
+
+ mutex_lock(&bucket->ftb_mtx);
+ fpp = (fasttrap_provider_t **)&bucket->ftb_data;
+ while ((fp = *fpp) != NULL) {
+ /*
+ * Acquire and release the lock as a simple way of
+ * waiting for any other consumer to finish with
+ * this provider. A thread must first acquire the
+ * bucket lock so there's no chance of another thread
+ * blocking on the provider's lock.
+ */
+ mutex_lock(&fp->ftp_mtx);
+ mutex_unlock(&fp->ftp_mtx);
+
+ if (dtrace_unregister(fp->ftp_provid) != 0) {
+ fail = 1;
+ fpp = &fp->ftp_next;
+ } else {
+ *fpp = fp->ftp_next;
+ fasttrap_provider_free(fp);
+ }
+ }
+
+ mutex_unlock(&bucket->ftb_mtx);
+ }
+
+#ifdef FIXME
+ if (fail) {
+ uint_t work;
+
+ /*
+ * If we're failing to detach, we need to unblock timeouts
+ * and start a new timeout if any work has accumulated while
+ * we've been unsuccessfully trying to detach.
+ */
+ mutex_lock(&fasttrap_cleanup_mtx);
+ fasttrap_cleanup_state = CLEANUP_NONE;
+ work = fasttrap_cleanup_work;
+ mutex_unlock(&fasttrap_cleanup_mtx);
+
+ if (work)
+ fasttrap_pid_cleanup();
+
+ dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
+ &fasttrap_meta_id);
+
+ return (DDI_FAILURE);
+ }
+#else
+ /*
+ * If we fail to detach, we are in lots of trouble because Linux does
+ * not allow us to fail on a module unload. We try to continue anyway
+ * since triggering an OOPS at this point is even worse.
+ */
+ if (fail)
+ pr_err("Unrecoverable error removing the fasttrap provider\n");
+#endif
+
+#ifdef DEBUG
+ mutex_lock(&fasttrap_count_mtx);
+ ASSERT(fasttrap_pid_count == 0);
+ mutex_unlock(&fasttrap_count_mtx);
+#endif
+
+ kfree(fasttrap_tpoints.fth_table);
+ fasttrap_tpoints.fth_nent = 0;
+
+ kfree(fasttrap_provs.fth_table);
+ fasttrap_provs.fth_nent = 0;
+
+ kfree(fasttrap_procs.fth_table);
+ fasttrap_procs.fth_nent = 0;
+
+ /*
+ * We know there are no tracepoints in any process anywhere in
+ * the system so there is no process which has its p_dtrace_count
+ * greater than zero, therefore we know that no thread can actively
+ * be executing code in fasttrap_fork(). Similarly for p_dtrace_probes
+ * and fasttrap_exec() and fasttrap_exit().
+ */
+#ifdef FIXME
+ ASSERT(dtrace_fasttrap_fork_ptr == &fasttrap_fork);
+ dtrace_fasttrap_fork_ptr = NULL;
+
+ ASSERT(dtrace_fasttrap_exec_ptr == &fasttrap_exec_exit);
+ dtrace_fasttrap_exec_ptr = NULL;
+
+ ASSERT(dtrace_fasttrap_exit_ptr == &fasttrap_exec_exit);
+ dtrace_fasttrap_exit_ptr = NULL;
+#endif
+
misc_deregister(&fasttrap_dev);
}
--- /dev/null
+#ifndef _FASTTRAP_IMPL_H_
+#define _FASTTRAP_IMPL_H_
+
+#include <linux/fasttrap.h>
+
+/*
+ * Fasttrap Providers, Probes and Tracepoints
+ *
+ * Each Solaris process can have multiple providers -- the pid provider as
+ * well as any number of user-level statically defined tracing (USDT)
+ * providers. Those providers are each represented by a fasttrap_provider_t.
+ * All providers for a given process have a pointer to a shared
+ * fasttrap_proc_t. The fasttrap_proc_t has two states: active or defunct.
+ * When the count of active providers goes to zero it becomes defunct; a
+ * provider drops its active count when it is removed individually or as part
+ * of a mass removal when a process exits or performs an exec.
+ *
+ * Each probe is represented by a fasttrap_probe_t which has a pointer to
+ * its associated provider as well as a list of fasttrap_id_tp_t structures
+ * which are tuples combining a fasttrap_id_t and a fasttrap_tracepoint_t.
+ * A fasttrap_tracepoint_t represents the actual point of instrumentation
+ * and it contains two lists of fasttrap_id_t structures (to be fired pre-
+ * and post-instruction emulation) that identify the probes attached to the
+ * tracepoint. Tracepoints also have a pointer to the fasttrap_proc_t for the
+ * process they trace which is used when looking up a tracepoint both when a
+ * probe fires and when enabling and disabling probes.
+ *
+ * It's important to note that probes are preallocated with the necessary
+ * number of tracepoints, but that tracepoints can be shared by probes and
+ * swapped between probes. If a probe's preallocated tracepoint is enabled
+ * (and, therefore, the associated probe is enabled), and that probe is
+ * then disabled, ownership of that tracepoint may be exchanged for an
+ * unused tracepoint belonging to another probe that was attached to the
+ * enabled tracepoint.
+ */
+typedef struct fasttrap_proc {
+ pid_t ftpc_pid; /* process ID for this proc */
+ atomic64_t ftpc_acount; /* count of active providers */
+ uint64_t ftpc_rcount; /* count of extant providers */
+ struct mutex ftpc_mtx; /* lock on all but acount */
+ struct fasttrap_proc *ftpc_next; /* next proc in hash chain */
+} fasttrap_proc_t;
+
+typedef struct fasttrap_provider {
+ pid_t ftp_pid; /* process ID for this prov */
+ char ftp_name[DTRACE_PROVNAMELEN]; /* prov name (w/o the pid) */
+ dtrace_provider_id_t ftp_provid; /* DTrace provider handle */
+ uint_t ftp_marked; /* mark for possible removal */
+ uint_t ftp_retired; /* mark when retired */
+ struct mutex ftp_mtx; /* provider lock */
+ struct mutex ftp_cmtx; /* lock on creating probes */
+ uint64_t ftp_rcount; /* enabled probes ref count */
+ uint64_t ftp_ccount; /* consumers creating probes */
+ uint64_t ftp_mcount; /* meta provider count */
+ fasttrap_proc_t *ftp_proc; /* shared proc for all provs */
+ struct fasttrap_provider *ftp_next; /* next prov in hash chain */
+} fasttrap_provider_t;
+
+typedef struct fasttrap_id fasttrap_id_t;
+typedef struct fasttrap_probe fasttrap_probe_t;
+typedef struct fasttrap_tracepoint fasttrap_tracepoint_t;
+
+struct fasttrap_id {
+ fasttrap_probe_t *fti_probe; /* referrring probe */
+ fasttrap_id_t *fti_next; /* enabled probe list on tp */
+ fasttrap_probe_type_t fti_ptype; /* probe type */
+};
+
+typedef unsigned long fasttrap_machtp_t; /* FIXME */
+
+struct fasttrap_tracepoint {
+ fasttrap_proc_t *ftt_proc; /* associated process struct */
+ uintptr_t ftt_pc; /* address of tracepoint */
+ pid_t ftt_pid; /* pid of tracepoint */
+ fasttrap_machtp_t ftt_mtp; /* ISA-specific portion */
+ fasttrap_id_t *ftt_ids; /* NULL-terminated list */
+ fasttrap_id_t *ftt_retids; /* NULL-terminated list */
+ fasttrap_tracepoint_t *ftt_next; /* link in global hash */
+};
+
+typedef struct fasttrap_id_tp {
+ fasttrap_id_t fit_id;
+ fasttrap_tracepoint_t *fit_tp;
+} fasttrap_id_tp_t;
+
+struct fasttrap_probe {
+ dtrace_id_t ftp_id; /* DTrace probe identifier */
+ pid_t ftp_pid; /* pid for this probe */
+ fasttrap_provider_t *ftp_prov; /* this probe's provider */
+ uintptr_t ftp_faddr; /* associated function's addr */
+ size_t ftp_fsize; /* associated function's size */
+ uint64_t ftp_gen; /* modification generation */
+ uint64_t ftp_ntps; /* number of tracepoints */
+ uint8_t *ftp_argmap; /* native to translated args */
+ uint8_t ftp_nargs; /* translated argument count */
+ uint8_t ftp_enabled; /* is this probe enabled */
+ char *ftp_xtypes; /* translated types index */
+ char *ftp_ntypes; /* native types index */
+ fasttrap_id_tp_t ftp_tps[1]; /* flexible array */
+};
+
+typedef struct fasttrap_bucket {
+ struct mutex ftb_mtx; /* bucket lock */
+ void *ftb_data; /* data payload */
+
+ uint8_t ftb_pad[64 - sizeof(struct mutex) - sizeof(void *)];
+} fasttrap_bucket_t;
+
+typedef struct fasttrap_hash {
+ ulong_t fth_nent; /* power-of-2 num. of entries */
+ ulong_t fth_mask; /* fth_nent - 1 */
+ fasttrap_bucket_t *fth_table; /* array of buckets */
+} fasttrap_hash_t;
+
+#define FASTTRAP_ID_INDEX(id) \
+ ((fasttrap_id_tp_t *)(((char *)(id) - \
+ offsetof(fasttrap_id_tp_t, fit_id))) - \
+ &(id)->fti_probe->ftp_tps[0])
+
+#define FASTTRAP_OFFSET_AFRAMES 3
+
+extern void fasttrap_meta_create_probe(void *, void *,
+ dtrace_helper_probedesc_t *);
+extern void *fasttrap_meta_provide(void *, dtrace_helper_provdesc_t *, pid_t);
+extern void fasttrap_meta_remove(void *, dtrace_helper_provdesc_t *, pid_t);
+
+extern dtrace_meta_provider_id_t fasttrap_id;
+
+extern int fasttrap_dev_init(void);
+extern void fasttrap_dev_exit(void);
+
+#endif /* _FASTTRAP_IMPL_H_ */
*
* CDDL HEADER END
*
- * Copyright 2010, 2011 Oracle, Inc. All rights reserved.
+ * Copyright 2010, 2011, 2012, 2013 Oracle, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include "dtrace.h"
#include "dtrace_dev.h"
-#include "fasttrap.h"
+#include "fasttrap_impl.h"
MODULE_AUTHOR("Kris Van Hees (kris.van.hees@oracle.com)");
MODULE_DESCRIPTION("Fasttrap Tracing");
MODULE_VERSION("v0.1");
MODULE_LICENSE("CDDL");
-static const dtrace_pattr_t fasttrap_attr = {
+static dtrace_mops_t fasttrap_mops = {
+ fasttrap_meta_create_probe,
+ fasttrap_meta_provide,
+ fasttrap_meta_remove
};
-static dtrace_pops_t fasttrap_pops = {
-};
-
-DT_PROVIDER_MODULE(fasttrap, DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER)
+DT_META_PROVIDER_MODULE(fasttrap)