if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
            (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
             (battery->capacity_now <= battery->alarm)))
-               pm_wakeup_event(&battery->device->dev, 0);
+               acpi_pm_wakeup_event(&battery->device->dev);
 
        return result;
 }
 
        }
 
        if (state)
-               pm_wakeup_event(&device->dev, 0);
+               acpi_pm_wakeup_event(&device->dev);
 
        ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
        if (ret == NOTIFY_DONE)
                } else {
                        int keycode;
 
-                       pm_wakeup_event(&device->dev, 0);
+                       acpi_pm_wakeup_event(&device->dev);
                        if (button->suspended)
                                break;
 
                lid_device = device;
        }
 
+       device_init_wakeup(&device->dev, true);
        printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
        return 0;
 
 
 #include <linux/pm_qos.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
+#include <linux/suspend.h>
 
 #include "internal.h"
 
 #ifdef CONFIG_PM
 static DEFINE_MUTEX(acpi_pm_notifier_lock);
 
+void acpi_pm_wakeup_event(struct device *dev)
+{
+       pm_wakeup_dev_event(dev, 0, acpi_s2idle_wakeup());
+}
+EXPORT_SYMBOL_GPL(acpi_pm_wakeup_event);
+
 static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
 {
        struct acpi_device *adev;
        mutex_lock(&acpi_pm_notifier_lock);
 
        if (adev->wakeup.flags.notifier_present) {
-               __pm_wakeup_event(adev->wakeup.ws, 0);
+               pm_wakeup_ws_event(adev->wakeup.ws, 0, acpi_s2idle_wakeup());
                if (adev->wakeup.context.func)
                        adev->wakeup.context.func(&adev->wakeup.context);
        }
 
                                   Suspend/Resume
   -------------------------------------------------------------------------- */
 #ifdef CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT
+extern bool acpi_s2idle_wakeup(void);
 extern int acpi_sleep_init(void);
 #else
+static inline bool acpi_s2idle_wakeup(void) { return false; }
 static inline int acpi_sleep_init(void) { return -ENXIO; }
 #endif
 
 
        .recover = acpi_pm_finish,
 };
 
+static bool s2idle_wakeup;
+
 static int acpi_freeze_begin(void)
 {
        acpi_scan_lock_acquire();
        return 0;
 }
 
+static void acpi_freeze_wake(void)
+{
+       /*
+        * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
+        * that the SCI has triggered while suspended, so cancel the wakeup in
+        * case it has not been a wakeup event (the GPEs will be checked later).
+        */
+       if (acpi_sci_irq_valid() &&
+           !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) {
+               pm_system_cancel_wakeup();
+               s2idle_wakeup = true;
+       }
+}
+
+static void acpi_freeze_sync(void)
+{
+       /*
+        * Process all pending events in case there are any wakeup ones.
+        *
+        * The EC driver uses the system workqueue, so that one needs to be
+        * flushed too.
+        */
+       acpi_os_wait_events_complete();
+       flush_scheduled_work();
+       s2idle_wakeup = false;
+}
+
 static void acpi_freeze_restore(void)
 {
        if (acpi_sci_irq_valid())
 static const struct platform_freeze_ops acpi_freeze_ops = {
        .begin = acpi_freeze_begin,
        .prepare = acpi_freeze_prepare,
+       .wake = acpi_freeze_wake,
+       .sync = acpi_freeze_sync,
        .restore = acpi_freeze_restore,
        .end = acpi_freeze_end,
 };
 }
 
 #else /* !CONFIG_SUSPEND */
+#define s2idle_wakeup  (false)
 static inline void acpi_sleep_suspend_setup(void) {}
 #endif /* !CONFIG_SUSPEND */
 
+bool acpi_s2idle_wakeup(void)
+{
+       return s2idle_wakeup;
+}
+
 #ifdef CONFIG_PM_SLEEP
 static u32 saved_bm_rld;
 
 
        if (async_error)
                goto Complete;
 
-       if (pm_wakeup_pending()) {
-               async_error = -EBUSY;
-               goto Complete;
-       }
-
        if (dev->power.syscore || dev->power.direct_complete)
                goto Complete;
 
 
 /* First wakeup IRQ seen by the kernel in the last cycle. */
 unsigned int pm_wakeup_irq __read_mostly;
 
-/* If set and the system is suspending, terminate the suspend. */
-static bool pm_abort_suspend __read_mostly;
+/* If greater than 0 and the system is suspending, terminate the suspend. */
+static atomic_t pm_abort_suspend __read_mostly;
 
 /*
  * Combined counters of registered wakeup events and wakeup events in progress.
                pm_print_active_wakeup_sources();
        }
 
-       return ret || pm_abort_suspend;
+       return ret || atomic_read(&pm_abort_suspend) > 0;
 }
 
 void pm_system_wakeup(void)
 {
-       pm_abort_suspend = true;
+       atomic_inc(&pm_abort_suspend);
        freeze_wake();
 }
 EXPORT_SYMBOL_GPL(pm_system_wakeup);
 
-void pm_wakeup_clear(void)
+void pm_system_cancel_wakeup(void)
+{
+       atomic_dec(&pm_abort_suspend);
+}
+
+void pm_wakeup_clear(bool reset)
 {
-       pm_abort_suspend = false;
        pm_wakeup_irq = 0;
+       if (reset)
+               atomic_set(&pm_abort_suspend, 0);
 }
 
 void pm_system_irq_wakeup(unsigned int irq_number)
 
 #endif
 
 #ifdef CONFIG_PM
+void acpi_pm_wakeup_event(struct device *dev);
 acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
                        void (*func)(struct acpi_device_wakeup_context *context));
 acpi_status acpi_remove_pm_notifier(struct acpi_device *adev);
 int acpi_pm_device_sleep_state(struct device *, int *, int);
 int acpi_pm_device_run_wake(struct device *, bool);
 #else
+static inline void acpi_pm_wakeup_event(struct device *dev)
+{
+}
 static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
                                               struct device *dev,
-                                              void (*work_func)(struct work_struct *work))
+                                              void (*func)(struct acpi_device_wakeup_context *context))
 {
        return AE_SUPPORT;
 }
 
 struct platform_freeze_ops {
        int (*begin)(void);
        int (*prepare)(void);
+       void (*wake)(void);
+       void (*sync)(void);
        void (*restore)(void);
        void (*end)(void);
 };
 
 extern bool pm_wakeup_pending(void);
 extern void pm_system_wakeup(void);
-extern void pm_wakeup_clear(void);
+extern void pm_system_cancel_wakeup(void);
+extern void pm_wakeup_clear(bool reset);
 extern void pm_system_irq_wakeup(unsigned int irq_number);
 extern bool pm_get_wakeup_count(unsigned int *count, bool block);
 extern bool pm_save_wakeup_count(unsigned int count);
 
 static inline bool pm_wakeup_pending(void) { return false; }
 static inline void pm_system_wakeup(void) {}
-static inline void pm_wakeup_clear(void) {}
+static inline void pm_wakeup_clear(bool reset) {}
 static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
 
 static inline void lock_system_sleep(void) {}
 
        if (!pm_freezing)
                atomic_inc(&system_freezing_cnt);
 
-       pm_wakeup_clear();
+       pm_wakeup_clear(true);
        pr_info("Freezing user space processes ... ");
        pm_freezing = true;
        error = try_to_freeze_tasks(true);
 
 
 static void freeze_enter(void)
 {
+       trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true);
+
        spin_lock_irq(&suspend_freeze_lock);
        if (pm_wakeup_pending())
                goto out;
 
        /* Push all the CPUs into the idle loop. */
        wake_up_all_idle_cpus();
-       pr_debug("PM: suspend-to-idle\n");
        /* Make the current CPU wait so it can enter the idle loop too. */
        wait_event(suspend_freeze_wait_head,
                   suspend_freeze_state == FREEZE_STATE_WAKE);
-       pr_debug("PM: resume from suspend-to-idle\n");
 
        cpuidle_pause();
        put_online_cpus();
  out:
        suspend_freeze_state = FREEZE_STATE_NONE;
        spin_unlock_irq(&suspend_freeze_lock);
+
+       trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false);
+}
+
+static void s2idle_loop(void)
+{
+       pr_debug("PM: suspend-to-idle\n");
+
+       do {
+               freeze_enter();
+
+               if (freeze_ops && freeze_ops->wake)
+                       freeze_ops->wake();
+
+               dpm_resume_noirq(PMSG_RESUME);
+               if (freeze_ops && freeze_ops->sync)
+                       freeze_ops->sync();
+
+               if (pm_wakeup_pending())
+                       break;
+
+               pm_wakeup_clear(false);
+       } while (!dpm_suspend_noirq(PMSG_SUSPEND));
+
+       pr_debug("PM: resume from suspend-to-idle\n");
 }
 
 void freeze_wake(void)
         * all the devices are suspended.
         */
        if (state == PM_SUSPEND_FREEZE) {
-               trace_suspend_resume(TPS("machine_suspend"), state, true);
-               freeze_enter();
-               trace_suspend_resume(TPS("machine_suspend"), state, false);
-               goto Platform_wake;
+               s2idle_loop();
+               goto Platform_early_resume;
        }
 
        error = disable_nonboot_cpus();