static struct imc_pmu_ref *trace_imc_refc;
 static int trace_imc_mem_size;
 
+/*
+ * Global data structure used to avoid races between thread,
+ * core and trace-imc
+ */
+static struct imc_pmu_ref imc_global_refc = {
+       .lock = __MUTEX_INITIALIZER(imc_global_refc.lock),
+       .id = 0,
+       .refc = 0,
+};
+
 static struct imc_pmu *imc_event_to_pmu(struct perf_event *event)
 {
        return container_of(event->pmu, struct imc_pmu, pmu);
                        return -EINVAL;
 
                ref->refc = 0;
+               /*
+                * Reduce the global reference count, if this is the
+                * last cpu in this core and core-imc event running
+                * in this cpu.
+                */
+               mutex_lock(&imc_global_refc.lock);
+               if (imc_global_refc.id == IMC_DOMAIN_CORE)
+                       imc_global_refc.refc--;
+
+               mutex_unlock(&imc_global_refc.lock);
        }
        return 0;
 }
                                 ppc_core_imc_cpu_offline);
 }
 
+static void reset_global_refc(struct perf_event *event)
+{
+               mutex_lock(&imc_global_refc.lock);
+               imc_global_refc.refc--;
+
+               /*
+                * If no other thread is running any
+                * event for this domain(thread/core/trace),
+                * set the global id to zero.
+                */
+               if (imc_global_refc.refc <= 0) {
+                       imc_global_refc.refc = 0;
+                       imc_global_refc.id = 0;
+               }
+               mutex_unlock(&imc_global_refc.lock);
+}
+
 static void core_imc_counters_release(struct perf_event *event)
 {
        int rc, core_id;
                ref->refc = 0;
        }
        mutex_unlock(&ref->lock);
+
+       reset_global_refc(event);
 }
 
 static int core_imc_event_init(struct perf_event *event)
        ++ref->refc;
        mutex_unlock(&ref->lock);
 
+       /*
+        * Since the system can run either in accumulation or trace-mode
+        * of IMC at a time, core-imc events are allowed only if no other
+        * trace/thread imc events are enabled/monitored.
+        *
+        * Take the global lock, and check the refc.id
+        * to know whether any other trace/thread imc
+        * events are running.
+        */
+       mutex_lock(&imc_global_refc.lock);
+       if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
+               /*
+                * No other trace/thread imc events are running in
+                * the system, so set the refc.id to core-imc.
+                */
+               imc_global_refc.id = IMC_DOMAIN_CORE;
+               imc_global_refc.refc++;
+       } else {
+               mutex_unlock(&imc_global_refc.lock);
+               return -EBUSY;
+       }
+       mutex_unlock(&imc_global_refc.lock);
+
        event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
        event->destroy = core_imc_counters_release;
        return 0;
 
 static int ppc_thread_imc_cpu_offline(unsigned int cpu)
 {
-       mtspr(SPRN_LDBAR, 0);
+       /*
+        * Set the bit 0 of LDBAR to zero.
+        *
+        * If bit 0 of LDBAR is unset, it will stop posting
+        * the counter data to memory.
+        * For thread-imc, bit 0 of LDBAR will be set to 1 in the
+        * event_add function. So reset this bit here, to stop the updates
+        * to memory in the cpu_offline path.
+        */
+       mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
+
+       /* Reduce the refc if thread-imc event running on this cpu */
+       mutex_lock(&imc_global_refc.lock);
+       if (imc_global_refc.id == IMC_DOMAIN_THREAD)
+               imc_global_refc.refc--;
+       mutex_unlock(&imc_global_refc.lock);
+
        return 0;
 }
 
        if (!target)
                return -EINVAL;
 
+       mutex_lock(&imc_global_refc.lock);
+       /*
+        * Check if any other trace/core imc events are running in the
+        * system, if not set the global id to thread-imc.
+        */
+       if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_THREAD) {
+               imc_global_refc.id = IMC_DOMAIN_THREAD;
+               imc_global_refc.refc++;
+       } else {
+               mutex_unlock(&imc_global_refc.lock);
+               return -EBUSY;
+       }
+       mutex_unlock(&imc_global_refc.lock);
+
        event->pmu->task_ctx_nr = perf_sw_context;
+       event->destroy = reset_global_refc;
        return 0;
 }
 
        int core_id;
        struct imc_pmu_ref *ref;
 
-       mtspr(SPRN_LDBAR, 0);
-
        core_id = smp_processor_id() / threads_per_core;
        ref = &core_imc_refc[core_id];
+       if (!ref) {
+               pr_debug("imc: Failed to get event reference count\n");
+               return;
+       }
 
        mutex_lock(&ref->lock);
        ref->refc--;
                ref->refc = 0;
        }
        mutex_unlock(&ref->lock);
+
+       /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
+       mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
+
        /*
         * Take a snapshot and calculate the delta and update
         * the event counter values.
 
 static int ppc_trace_imc_cpu_offline(unsigned int cpu)
 {
-       mtspr(SPRN_LDBAR, 0);
+       /*
+        * No need to set bit 0 of LDBAR to zero, as
+        * it is set to zero for imc trace-mode
+        *
+        * Reduce the refc if any trace-imc event running
+        * on this cpu.
+        */
+       mutex_lock(&imc_global_refc.lock);
+       if (imc_global_refc.id == IMC_DOMAIN_TRACE)
+               imc_global_refc.refc--;
+       mutex_unlock(&imc_global_refc.lock);
+
        return 0;
 }
 
        local_mem = get_trace_imc_event_base_addr();
        ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | TRACE_IMC_ENABLE;
 
-       if (core_imc_refc)
-               ref = &core_imc_refc[core_id];
+       /* trace-imc reference count */
+       if (trace_imc_refc)
+               ref = &trace_imc_refc[core_id];
        if (!ref) {
-               /* If core-imc is not enabled, use trace-imc reference count */
-               if (trace_imc_refc)
-                       ref = &trace_imc_refc[core_id];
-               if (!ref)
-                       return -EINVAL;
+               pr_debug("imc: Failed to get the event reference count\n");
+               return -EINVAL;
        }
+
        mtspr(SPRN_LDBAR, ldbar_value);
        mutex_lock(&ref->lock);
        if (ref->refc == 0) {
                                get_hard_smp_processor_id(smp_processor_id()))) {
                        mutex_unlock(&ref->lock);
                        pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
-                       mtspr(SPRN_LDBAR, 0);
                        return -EINVAL;
                }
        }
        ++ref->refc;
        mutex_unlock(&ref->lock);
-
        return 0;
 }
 
        int core_id = smp_processor_id() / threads_per_core;
        struct imc_pmu_ref *ref = NULL;
 
-       if (core_imc_refc)
-               ref = &core_imc_refc[core_id];
+       if (trace_imc_refc)
+               ref = &trace_imc_refc[core_id];
        if (!ref) {
-               /* If core-imc is not enabled, use trace-imc reference count */
-               if (trace_imc_refc)
-                       ref = &trace_imc_refc[core_id];
-               if (!ref)
-                       return;
+               pr_debug("imc: Failed to get event reference count\n");
+               return;
        }
-       mtspr(SPRN_LDBAR, 0);
+
        mutex_lock(&ref->lock);
        ref->refc--;
        if (ref->refc == 0) {
                ref->refc = 0;
        }
        mutex_unlock(&ref->lock);
+
        trace_imc_event_stop(event, flags);
 }
 
        if (event->attr.sample_period == 0)
                return -ENOENT;
 
+       /*
+        * Take the global lock, and make sure
+        * no other thread is running any core/thread imc
+        * events
+        */
+       mutex_lock(&imc_global_refc.lock);
+       if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
+               /*
+                * No core/thread imc events are running in the
+                * system, so set the refc.id to trace-imc.
+                */
+               imc_global_refc.id = IMC_DOMAIN_TRACE;
+               imc_global_refc.refc++;
+       } else {
+               mutex_unlock(&imc_global_refc.lock);
+               return -EBUSY;
+       }
+       mutex_unlock(&imc_global_refc.lock);
+
        event->hw.idx = -1;
        target = event->hw.target;
 
        event->pmu->task_ctx_nr = perf_hw_context;
+       event->destroy = reset_global_refc;
        return 0;
 }
 
 static void thread_imc_ldbar_disable(void *dummy)
 {
        /*
-        * By Zeroing LDBAR, we disable thread-imc
-        * updates.
+        * By setting 0th bit of LDBAR to zero, we disable thread-imc
+        * updates to memory.
         */
-       mtspr(SPRN_LDBAR, 0);
+       mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
 }
 
 void thread_imc_disable(void)