struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
        unsigned long flags;
 
-       pm_runtime_get_sync(drvdata->dev);
-
        spin_lock_irqsave(&drvdata->spinlock, flags);
        etb_enable_hw(drvdata);
        drvdata->enable = true;
        drvdata->enable = false;
        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
-       pm_runtime_put(drvdata->dev);
-
        dev_info(drvdata->dev, "ETB disabled\n");
 }
 
 
        struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
        int ret;
 
-       pm_runtime_get_sync(csdev->dev.parent);
        spin_lock(&drvdata->spinlock);
 
        /*
        return 0;
 err:
        spin_unlock(&drvdata->spinlock);
-       pm_runtime_put(csdev->dev.parent);
        return ret;
 }
 
 
        spin_unlock(&drvdata->spinlock);
        put_online_cpus();
-       pm_runtime_put(csdev->dev.parent);
 
        dev_info(drvdata->dev, "ETM tracing disabled\n");
 }
 
        if (!drvdata->enable)
                return drvdata->trcid;
 
-       pm_runtime_get_sync(drvdata->dev);
        spin_lock_irqsave(&drvdata->spinlock, flags);
 
        CS_UNLOCK(drvdata->base);
        CS_LOCK(drvdata->base);
 
        spin_unlock_irqrestore(&drvdata->spinlock, flags);
-       pm_runtime_put(drvdata->dev);
 
        return trace_id;
 }
        struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
        int ret;
 
-       pm_runtime_get_sync(drvdata->dev);
        spin_lock(&drvdata->spinlock);
 
        /*
        return 0;
 err:
        spin_unlock(&drvdata->spinlock);
-       pm_runtime_put(drvdata->dev);
        return ret;
 }
 
        spin_unlock(&drvdata->spinlock);
        put_online_cpus();
 
-       pm_runtime_put(drvdata->dev);
-
        dev_info(drvdata->dev, "ETM tracing disabled\n");
 }
 
 
 {
        struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_get_sync(drvdata->dev);
        funnel_enable_hw(drvdata, inport);
 
        dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
        struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
        funnel_disable_hw(drvdata, inport);
-       pm_runtime_put(drvdata->dev);
 
        dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
 }
 
 {
        struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_get_sync(drvdata->dev);
-
        CS_UNLOCK(drvdata->base);
 
        /*
 
        CS_LOCK(drvdata->base);
 
-       pm_runtime_put(drvdata->dev);
-
        dev_info(drvdata->dev, "REPLICATOR disabled\n");
 }
 
 
 {
        struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_get_sync(drvdata->dev);
        dev_info(drvdata->dev, "REPLICATOR enabled\n");
        return 0;
 }
 {
        struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_put(drvdata->dev);
        dev_info(drvdata->dev, "REPLICATOR disabled\n");
 }
 
 
 {
        unsigned long flags;
 
-       pm_runtime_get_sync(drvdata->dev);
-
        spin_lock_irqsave(&drvdata->spinlock, flags);
        if (drvdata->reading) {
                spin_unlock_irqrestore(&drvdata->spinlock, flags);
-               pm_runtime_put(drvdata->dev);
                return -EBUSY;
        }
 
        drvdata->enable = false;
        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
-       pm_runtime_put(drvdata->dev);
-
        dev_info(drvdata->dev, "TMC disabled\n");
 }
 
 
 {
        struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_get_sync(csdev->dev.parent);
        tpiu_enable_hw(drvdata);
 
        dev_info(drvdata->dev, "TPIU enabled\n");
        struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
        tpiu_disable_hw(drvdata);
-       pm_runtime_put(csdev->dev.parent);
 
        dev_info(drvdata->dev, "TPIU disabled\n");
 }
 
 #include <linux/coresight.h>
 #include <linux/of_platform.h>
 #include <linux/delay.h>
+#include <linux/pm_runtime.h>
 
 #include "coresight-priv.h"
 
        /*
         * A path from this element to a sink has been found.  The elements
         * leading to the sink are already enqueued, all that is left to do
-        * is add a node for this element.
+        * is tell the PM runtime core we need this element and add a node
+        * for it.
         */
        node = kzalloc(sizeof(struct coresight_node), GFP_KERNEL);
        if (!node)
 
        node->csdev = csdev;
        list_add(&node->link, path);
+       pm_runtime_get_sync(csdev->dev.parent);
 
        return 0;
 }
  */
 void coresight_release_path(struct list_head *path)
 {
+       struct coresight_device *csdev;
        struct coresight_node *nd, *next;
 
        list_for_each_entry_safe(nd, next, path, link) {
+               csdev = nd->csdev;
+
+               pm_runtime_put_sync(csdev->dev.parent);
                list_del(&nd->link);
                kfree(nd);
        }