typedef int (*pm_callback_t)(struct device *);
 
+#define list_for_each_entry_rcu_locked(pos, head, member) \
+       list_for_each_entry_rcu(pos, head, member, \
+                       device_links_read_lock_held())
+
 /*
  * The entries in the dpm_list list are in a depth first order, simply
  * because children are guaranteed to be discovered after parents, and
         * callbacks freeing the link objects for the links in the list we're
         * walking.
         */
-       list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+       list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
                if (READ_ONCE(link->status) != DL_STATE_DORMANT)
                        dpm_wait(link->supplier, async);
 
         * continue instead of trying to continue in parallel with its
         * unregistration).
         */
-       list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+       list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
                if (READ_ONCE(link->status) != DL_STATE_DORMANT)
                        dpm_wait(link->consumer, async);
 
 
        idx = device_links_read_lock();
 
-       list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+       list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
                link->supplier->power.must_resume = true;
 
        device_links_read_unlock(idx);
 
        idx = device_links_read_lock();
 
-       list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+       list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
                spin_lock_irq(&link->supplier->power.lock);
                link->supplier->power.direct_complete = false;
                spin_unlock_irq(&link->supplier->power.lock);