Solaris MUTEX_HELD() macro. The former was merely testing whether the mutex was
locked, whereas the real test needed here is whether the mutex is held by the
current thread.
Signed-off-by: Kris Van Hees <kris.van.hees@oracle.com>
#include <linux/idr.h>
#include <linux/ktime.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/stringify.h>
#include <linux/types.h>
} while (0)
#endif
+#define MUTEX_HELD(lock) mutex_owned(lock)
+
#endif /* _DTRACE_H_ */
{
dtrace_state_t *state;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
if ((state = dtrace_anon.dta_state) == NULL) {
ASSERT(dtrace_anon.dta_enabling == NULL);
dof_hdr_t *dof;
char c[32]; /* enough for "dof-data-" + digits */
- ASSERT(mutex_is_locked(&dtrace_lock));
- ASSERT(mutex_is_locked(&cpu_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&cpu_lock));
for (i = 0; ; i++) {
snprintf(c, sizeof (c), "dof-data-%d", i);
processorid_t cpu;
dtrace_buffer_t *buf;
- ASSERT(mutex_is_locked(&dtrace_lock));
- ASSERT(mutex_is_locked(&cpu_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&cpu_lock));
#ifdef FIXME
if (size > dtrace_nonroot_maxsize &&
void dtrace_buffer_polish(dtrace_buffer_t *buf)
{
ASSERT(buf->dtb_flags & DTRACEBUF_RING);
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
return;
int i, oldsvars, osz, nsz, otlocals, ntlocals;
uint_t id;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
for (i = 0; i < dp->dtdo_varlen; i++) {
{
int i;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
ASSERT(dp->dtdo_refcnt != 0);
for (i = 0; i < dp->dtdo_varlen; i++) {
}
m.mx = dtrace_load64(tupregs[0].dttk_value);
-#ifdef CONFIG_SMP
- regs[rd] = m.mi.owner != NULL;
-#else
- regs[rd] = mutex_is_locked(&m.mi);
-#endif
+ regs[rd] = mutex_owned(&m.mi);
break;
case DIF_SUBR_MUTEX_OWNER:
roundup(sizeof(dof_sec_t), sizeof(uint64_t)) +
sizeof(dof_optdesc_t) * DTRACEOPT_MAX;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
dof = kmalloc(len, GFP_KERNEL);
dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
{
dof_hdr_t hdr, *dof;
-#ifdef FIXME
- /* This seems to be unnecessary and actually wrong). */
- ASSERT(!mutex_is_locked(&dtrace_lock));
-#endif
+ ASSERT(!MUTEX_HELD(&dtrace_lock));
/*
* First, we're going to copyin() the sizeof(dof_hdr_t).
dtrace_enabling_t *enab;
uint_t i;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
ASSERT(dof->dofh_loadsz >= sizeof(dof_hdr_t));
/*
dtrace_optval_t *opt = state->dts_options, nframes, strsize;
uint64_t arg = desc->dtad_arg;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
if (DTRACEACT_ISAGG(desc->dtad_kind)) {
dtrace_ecb_t *pecb, *prev = NULL;
dtrace_probe_t *probe = ecb->dte_probe;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
if (probe == NULL)
return;
dtrace_ecb_t *ecb;
dtrace_epid_t epid;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
ecb = kzalloc(sizeof(dtrace_ecb_t), GFP_KERNEL);
ecb->dte_predicate = NULL;
dtrace_provider_t *prov;
dtrace_ecbdesc_t *desc = enab->dten_current;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
ASSERT(state != NULL);
ecb = dtrace_ecb_add(state, probe);
dtrace_predicate_t *pred;
dtrace_epid_t epid = ecb->dte_epid;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
ASSERT(ecb->dte_next == NULL);
ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
{
dtrace_probe_t *probe = ecb->dte_probe;
- ASSERT(mutex_is_locked(&cpu_lock));
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&cpu_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
ASSERT(ecb->dte_next == NULL);
if (probe == NULL)
{
dtrace_ecb_t *ecb;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
if (id == 0 || id > state->dts_necbs)
return NULL;
dtrace_aggregation_t *dtrace_aggid2agg(dtrace_state_t *state,
dtrace_aggid_t id)
{
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
return idr_find(&state->dts_agg_idr, id);
}
dtrace_ecbdesc_t *ep;
dtrace_vstate_t *vstate = enab->dten_vstate;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
for (i = 0; i < enab->dten_ndesc; i++) {
dtrace_actdesc_t *act, *next;
{
dtrace_state_t *state;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
ASSERT(enab->dten_vstate != NULL);
dtrace_enabling_t *new, *enab;
int found = 0, err = -ENOENT;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
{
dtrace_enabling_t *enab, *next;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
/*
* Iterate over all retained enablings, destroy the enablings retained
{
dtrace_predicate_t *pred;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
ASSERT(dp->dtdo_refcnt != 0);
pred = kzalloc(sizeof (dtrace_predicate_t), GFP_KERNEL);
void dtrace_predicate_hold(dtrace_predicate_t *pred)
{
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
ASSERT(pred->dtp_refcnt > 0);
{
dtrace_difo_t *dp = pred->dtp_difo;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
ASSERT(pred->dtp_refcnt > 0);
* held when we enter this function.
*/
if (provider == dtrace_provider) {
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
mutex_unlock(&dtrace_lock);
}
dtrace_id_t id;
int match;
- /* FIXME: Maybe? We really should protect against this. */
- if (mutex_is_locked(&dtrace_lock)) {
- WARN(1, "dtrace_probe_lookup() called with dtrace_lock held!");
- return 0;
- }
-
pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
pkey.dtpk_pmatch = &dtrace_match_string;
pkey.dtpk_mod = mod;
*idp = (dtrace_provider_id_t)provider;
if (pops == &dtrace_provider_ops) {
- ASSERT(mutex_is_locked(&dtrace_provider_lock));
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_provider_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
ASSERT(dtrace_anon.dta_enabling == NULL);
/*
* with locks already held.
*/
ASSERT(old == dtrace_provider);
- ASSERT(mutex_is_locked(&dtrace_provider_lock));
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_provider_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
self = 1;
dtrace_dynvar_t *dvar, *next, *start;
int i;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
memset(dstate, 0, sizeof (dtrace_dstate_t));
void dtrace_dstate_fini(dtrace_dstate_t *dstate)
{
- ASSERT(mutex_is_locked(&cpu_lock));
+ ASSERT(MUTEX_HELD(&cpu_lock));
if (dstate->dtds_base == NULL)
return;
int err;
dtrace_aggid_t aggid;
- ASSERT(mutex_is_locked(&dtrace_lock));
- ASSERT(mutex_is_locked(&cpu_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&cpu_lock));
state = kzalloc(sizeof (dtrace_state_t), GFP_KERNEL);
state->dts_epid = DTRACE_EPIDNONE + 1;
processorid_t cpu = DTRACE_CPUALL;
int flags = 0, rval;
- ASSERT(mutex_is_locked(&dtrace_lock));
- ASSERT(mutex_is_locked(&cpu_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&cpu_lock));
ASSERT(which < DTRACEOPT_MAX);
ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
(state == dtrace_anon.dta_state &&
{
dtrace_icookie_t cookie;
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
state->dts_activity != DTRACE_ACTIVITY_DRAINING)
int dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
dtrace_optval_t val)
{
- ASSERT(mutex_is_locked(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
return -EBUSY;
int nspec = state->dts_nspeculations;
uint32_t match;
- ASSERT(mutex_is_locked(&dtrace_lock));
- ASSERT(mutex_is_locked(&cpu_lock));
+ ASSERT(MUTEX_HELD(&dtrace_lock));
+ ASSERT(MUTEX_HELD(&cpu_lock));
/*
* First, retract any retained enablings for this state.
cyc_time_t when;
ASSERT(ktime_nz(prof->prof_interval));
- ASSERT(mutex_is_locked(&cpu_lock));
+ ASSERT(MUTEX_HELD(&cpu_lock));
if (prof->prof_kind == PROF_TICK) {
hdlr.cyh_func = profile_tick;
profile_probe_t *prof = parg;
ASSERT(prof->prof_cyclic != CYCLIC_NONE);
- ASSERT(mutex_is_locked(&cpu_lock));
+ ASSERT(MUTEX_HELD(&cpu_lock));
cyclic_remove(prof->prof_cyclic);
prof->prof_cyclic = CYCLIC_NONE;