/*
* Get an ID for the aggregation (add it to the idr).
*/
- mutex_unlock(&dtrace_lock);
-
idr_preload(GFP_KERNEL);
- mutex_lock(&dtrace_lock);
-
aggid = idr_alloc_cyclic(&state->dts_agg_idr, agg, 0, 0, GFP_NOWAIT);
idr_preload_end();
if (aggid < 0) {
probe = kmem_cache_alloc(dtrace_probe_cachep, __GFP_NOFAIL);
/*
- * The ir_preload() function should be called without holding locks.
+ * The idr_preload() should be called without holding locks as it may
+ * block. At the same time it is required to protect DTrace structures.
+ * We can't drop it before idr_preload() and acquire after it because
+ * we can't sleep in atomic context (until we reach idr_preload_end()).
+ *
+ * It is better to delay DTrace framework than traced host so the lock
+ * is being held for the duration of idr allocation.
+ *
* When the provider is the DTrace core itself, dtrace_lock will be
* held when we enter this function.
*/
if (provider == dtrace_provider) {
ASSERT(MUTEX_HELD(&dtrace_lock));
- mutex_unlock(&dtrace_lock);
+ } else {
+ mutex_lock(&dtrace_lock);
}
idr_preload(GFP_KERNEL);
-
- mutex_lock(&dtrace_lock);
id = idr_alloc_cyclic(&dtrace_probe_idr, probe, 0, 0, GFP_NOWAIT);
idr_preload_end();
if (id < 0) {
* We need to drop our locks when calling idr_preload(), so we try to
* get them back right after.
*/
- mutex_unlock(&dtrace_lock);
- mutex_unlock(&dtrace_provider_lock);
- mutex_unlock(&cpu_lock);
-
idr_preload(GFP_KERNEL);
-
- mutex_lock(&cpu_lock);
- mutex_lock(&dtrace_provider_lock);
- mutex_lock(&dtrace_lock);
-
id = idr_alloc_cyclic(&dtrace_probe_idr, NULL, 0, 0, GFP_NOWAIT);
idr_preload_end();
* Create a first entry in the aggregation IDR, so that ID 0 is used as
* that gets used as meaning 'none'.
*/
- mutex_unlock(&dtrace_lock);
- mutex_unlock(&cpu_lock);
-
idr_preload(GFP_KERNEL);
-
- mutex_lock(&cpu_lock);
- mutex_lock(&dtrace_lock);
-
aggid = idr_alloc_cyclic(&state->dts_agg_idr, NULL, 0, 0, GFP_NOWAIT);
idr_preload_end();