#define DMC620_PMU_COUNTERn_OFFSET(n) \
        (DMC620_PMU_COUNTERS_BASE + 0x28 * (n))
 
-static LIST_HEAD(dmc620_pmu_irqs);
+/*
+ * dmc620_pmu_irqs_lock: protects dmc620_pmu_irqs list
+ * dmc620_pmu_node_lock: protects pmus_node lists in all dmc620_pmu instances
+ */
 static DEFINE_MUTEX(dmc620_pmu_irqs_lock);
+static DEFINE_MUTEX(dmc620_pmu_node_lock);
+static LIST_HEAD(dmc620_pmu_irqs);
 
 struct dmc620_pmu_irq {
        struct hlist_node node;
                return PTR_ERR(irq);
 
        dmc620_pmu->irq = irq;
-       mutex_lock(&dmc620_pmu_irqs_lock);
+       mutex_lock(&dmc620_pmu_node_lock);
        list_add_rcu(&dmc620_pmu->pmus_node, &irq->pmus_node);
-       mutex_unlock(&dmc620_pmu_irqs_lock);
+       mutex_unlock(&dmc620_pmu_node_lock);
 
        return 0;
 }
 {
        struct dmc620_pmu_irq *irq = dmc620_pmu->irq;
 
-       mutex_lock(&dmc620_pmu_irqs_lock);
+       mutex_lock(&dmc620_pmu_node_lock);
        list_del_rcu(&dmc620_pmu->pmus_node);
+       mutex_unlock(&dmc620_pmu_node_lock);
 
+       mutex_lock(&dmc620_pmu_irqs_lock);
        if (!refcount_dec_and_test(&irq->refcount)) {
                mutex_unlock(&dmc620_pmu_irqs_lock);
                return;
                return 0;
 
        /* We're only reading, but this isn't the place to be involving RCU */
-       mutex_lock(&dmc620_pmu_irqs_lock);
+       mutex_lock(&dmc620_pmu_node_lock);
        list_for_each_entry(dmc620_pmu, &irq->pmus_node, pmus_node)
                perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target);
-       mutex_unlock(&dmc620_pmu_irqs_lock);
+       mutex_unlock(&dmc620_pmu_node_lock);
 
        WARN_ON(irq_set_affinity(irq->irq_num, cpumask_of(target)));
        irq->cpu = target;