* The driver of @dev will not receive interrupts while this function is being
  * executed.
  */
-static int device_resume_noirq(struct device *dev, pm_message_t state)
+static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 {
        pm_callback_t callback = NULL;
        char *info = NULL;
        if (!dev->power.is_noirq_suspended)
                goto Out;
 
+       dpm_wait(dev->parent, async);
+
        if (dev->pm_domain) {
                info = "noirq power domain ";
                callback = pm_noirq_op(&dev->pm_domain->ops, state);
        dev->power.is_noirq_suspended = false;
 
  Out:
+       complete_all(&dev->power.completion);
        TRACE_RESUME(error);
        return error;
 }
 
+static bool is_async(struct device *dev)
+{
+       return dev->power.async_suspend && pm_async_enabled
+               && !pm_trace_is_enabled();
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie)
+{
+       struct device *dev = (struct device *)data;
+       int error;
+
+       error = device_resume_noirq(dev, pm_transition, true);
+       if (error)
+               pm_dev_err(dev, pm_transition, " async", error);
+
+       put_device(dev);
+}
+
 /**
  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
  * @state: PM transition of the system being carried out.
  */
 static void dpm_resume_noirq(pm_message_t state)
 {
+       struct device *dev;
        ktime_t starttime = ktime_get();
 
        mutex_lock(&dpm_list_mtx);
-       while (!list_empty(&dpm_noirq_list)) {
-               struct device *dev = to_device(dpm_noirq_list.next);
-               int error;
+       pm_transition = state;
+
+       /*
+        * Advanced the async threads upfront,
+        * in case the starting of async threads is
+        * delayed by non-async resuming devices.
+        */
+       list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
+               reinit_completion(&dev->power.completion);
+               if (is_async(dev)) {
+                       get_device(dev);
+                       async_schedule(async_resume_noirq, dev);
+               }
+       }
 
+       while (!list_empty(&dpm_noirq_list)) {
+               dev = to_device(dpm_noirq_list.next);
                get_device(dev);
                list_move_tail(&dev->power.entry, &dpm_late_early_list);
                mutex_unlock(&dpm_list_mtx);
 
-               error = device_resume_noirq(dev, state);
-               if (error) {
-                       suspend_stats.failed_resume_noirq++;
-                       dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
-                       dpm_save_failed_dev(dev_name(dev));
-                       pm_dev_err(dev, state, " noirq", error);
+               if (!is_async(dev)) {
+                       int error;
+
+                       error = device_resume_noirq(dev, state, false);
+                       if (error) {
+                               suspend_stats.failed_resume_noirq++;
+                               dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+                               dpm_save_failed_dev(dev_name(dev));
+                               pm_dev_err(dev, state, " noirq", error);
+                       }
                }
 
                mutex_lock(&dpm_list_mtx);
                put_device(dev);
        }
        mutex_unlock(&dpm_list_mtx);
+       async_synchronize_full();
        dpm_show_time(starttime, state, "noirq");
        resume_device_irqs();
        cpuidle_resume();
        put_device(dev);
 }
 
-static bool is_async(struct device *dev)
-{
-       return dev->power.async_suspend && pm_async_enabled
-               && !pm_trace_is_enabled();
-}
-
 /**
  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
  * @state: PM transition of the system being carried out.