If device_prepare() runs on a device that has never had runtime
PM enabled so far, it may reasonably assume that runtime PM will
not be enabled for that device during the system suspend-resume
cycle currently in progress, but this has never been guaranteed.
To verify this assumption, make device_prepare() arrange for
triggering a device warning accompanied by a call trace dump if
runtime PM is enabled for such a device after it has returned.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
Link: https://patch.msgid.link/6131109.lOV4Wx5bFT@rjwysocki.net
 
        device_unlock(dev);
 
 out:
+       /* If enabling runtime PM for the device is blocked, unblock it. */
+       pm_runtime_unblock(dev);
        pm_runtime_put(dev);
 }
 
         * it again during the complete phase.
         */
        pm_runtime_get_noresume(dev);
+       /*
+        * If runtime PM is disabled for the device at this point and it has
+        * never been enabled so far, it should not be enabled until this system
+        * suspend-resume cycle is complete, so prepare to trigger a warning on
+        * subsequent attempts to enable it.
+        */
+       pm_runtime_block_if_disabled(dev);
 
        if (dev->power.syscore)
                return 0;
 
 }
 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
 
+void pm_runtime_block_if_disabled(struct device *dev)
+{
+       spin_lock_irq(&dev->power.lock);
+
+       if (dev->power.disable_depth && dev->power.last_status == RPM_INVALID)
+               dev->power.last_status = RPM_BLOCKED;
+
+       spin_unlock_irq(&dev->power.lock);
+}
+
+void pm_runtime_unblock(struct device *dev)
+{
+       spin_lock_irq(&dev->power.lock);
+
+       if (dev->power.last_status == RPM_BLOCKED)
+               dev->power.last_status = RPM_INVALID;
+
+       spin_unlock_irq(&dev->power.lock);
+}
+
 void __pm_runtime_disable(struct device *dev, bool check_resume)
 {
        spin_lock_irq(&dev->power.lock);
        if (--dev->power.disable_depth > 0)
                goto out;
 
+       if (dev->power.last_status == RPM_BLOCKED) {
+               dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");
+               dump_stack();
+       }
        dev->power.last_status = RPM_INVALID;
        dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
 
 
        RPM_RESUMING,
        RPM_SUSPENDED,
        RPM_SUSPENDING,
+       RPM_BLOCKED,
 };
 
 /*
 
 extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
 extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
 extern int pm_runtime_barrier(struct device *dev);
+extern void pm_runtime_block_if_disabled(struct device *dev);
+extern void pm_runtime_unblock(struct device *dev);
 extern void pm_runtime_enable(struct device *dev);
 extern void __pm_runtime_disable(struct device *dev, bool check_resume);
 extern void pm_runtime_allow(struct device *dev);
 static inline int __pm_runtime_set_status(struct device *dev,
                                            unsigned int status) { return 0; }
 static inline int pm_runtime_barrier(struct device *dev) { return 0; }
+static inline void pm_runtime_block_if_disabled(struct device *dev) {}
+static inline void pm_runtime_unblock(struct device *dev) {}
 static inline void pm_runtime_enable(struct device *dev) {}
 static inline void __pm_runtime_disable(struct device *dev, bool c) {}
 static inline void pm_runtime_allow(struct device *dev) {}