A: The following happen, listed in no particular order :-)
 
 - A notification is sent to in-kernel registered modules by sending an event
-  CPU_DOWN_PREPARE
+  CPU_DOWN_PREPARE or CPU_DOWN_PREPARE_FROZEN, depending on whether or not the
+  CPU is being offlined while tasks are frozen due to a suspend operation in
+  progress
 - All process is migrated away from this outgoing CPU to a new CPU
 - All interrupts targeted to this CPU is migrated to a new CPU
 - timers/bottom half/task lets are also migrated to a new CPU
 - Once all services are migrated, kernel calls an arch specific routine
   __cpu_disable() to perform arch specific cleanup.
 - Once this is successful, an event for successful cleanup is sent by an event
-  CPU_DEAD.
+  CPU_DEAD (or CPU_DEAD_FROZEN if tasks are frozen due to a suspend while the
+  CPU is being offlined).
 
   "It is expected that each service cleans up when the CPU_DOWN_PREPARE
   notifier is called, when CPU_DEAD is called its expected there is nothing
 
                switch (action) {
                case CPU_ONLINE:
+               case CPU_ONLINE_FROZEN:
                        foobar_online_action(cpu);
                        break;
                case CPU_DEAD:
+               case CPU_DEAD_FROZEN:
                        foobar_dead_action(cpu);
                        break;
                }
 
        sys_dev = get_cpu_sysdev(cpu);
        switch (action) {
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                cache_add_dev(sys_dev);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                cache_remove_dev(sys_dev);
                break;
        }
 
        mutex_lock(&therm_cpu_lock);
        switch (action) {
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                err = thermal_throttle_add_dev(sys_dev);
                WARN_ON(err);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                thermal_throttle_remove_dev(sys_dev);
                break;
        }
 
 
        switch (action) {
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                cpuid_device_create(cpu);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
                break;
        }
 
        sys_dev = get_cpu_sysdev(cpu);
        switch (action) {
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
        case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
                mc_sysdev_add(sys_dev);
                break;
        case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
                mc_sysdev_remove(sys_dev);
                break;
        }
 
 
        switch (action) {
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                msr_device_create(cpu);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
                break;
        }
 
        sys_dev = get_cpu_sysdev(cpu);
        switch (action) {
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                err_inject_add_dev(sys_dev);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                err_inject_remove_dev(sys_dev);
                break;
        }
 
 
        switch (action) {
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                create_palinfo_proc_entries(hotcpu);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                remove_palinfo_proc_entries(hotcpu);
                break;
        }
 
        struct salinfo_data *data;
        switch (action) {
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                spin_lock_irqsave(&data_saved_lock, flags);
                for (i = 0, data = salinfo_data;
                     i < ARRAY_SIZE(salinfo_data);
                spin_unlock_irqrestore(&data_saved_lock, flags);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                spin_lock_irqsave(&data_saved_lock, flags);
                for (i = 0, data = salinfo_data;
                     i < ARRAY_SIZE(salinfo_data);
 
        sys_dev = get_cpu_sysdev(cpu);
        switch (action) {
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                cache_add_dev(sys_dev);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                cache_remove_dev(sys_dev);
                break;
        }
 
 
        switch (action) {
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                register_cpu_online(cpu);
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                unregister_cpu_online(cpu);
                break;
 #endif
 
 
        switch (action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                numa_setup_cpu(lcpu);
                ret = NOTIFY_OK;
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
                unmap_cpu_from_node(lcpu);
                break;
                ret = NOTIFY_OK;
 
 {
        switch (action) {
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                appldata_online_cpu((long) hcpu);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                appldata_offline_cpu((long) hcpu);
                break;
        default:
 
 
        switch (action) {
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                if (sysdev_create_file(s, &attr_capability))
                        return NOTIFY_BAD;
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                sysdev_remove_file(s, &attr_capability);
                break;
        }
 
 
        switch (action) {
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                mce_create_device(cpu);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                mce_remove_device(cpu);
                break;
        }
 
 
        switch (action) {
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                threshold_create_device(cpu);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                threshold_remove_device(cpu);
                break;
        default:
 
 cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
 {
        long cpu = (long)arg;
-       if (action == CPU_ONLINE)
+       if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
                smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
        return NOTIFY_DONE;
 }
 
         * If a CPU goes away, splice its entries to the current CPU
         * and trigger a run of the softirq
         */
-       if (action == CPU_DEAD) {
+       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
                int cpu = (unsigned long) hcpu;
 
                local_irq_disable();
 
 
        switch (action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                rc = topology_add_dev(cpu);
                break;
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                topology_remove_dev(cpu);
                break;
        }
 
        if (sys_dev) {
                switch (action) {
                case CPU_ONLINE:
+               case CPU_ONLINE_FROZEN:
                        cpufreq_add_dev(sys_dev);
                        break;
                case CPU_DOWN_PREPARE:
+               case CPU_DOWN_PREPARE_FROZEN:
                        if (unlikely(lock_policy_rwsem_write(cpu)))
                                BUG();
 
                        __cpufreq_remove_dev(sys_dev);
                        break;
                case CPU_DOWN_FAILED:
+               case CPU_DOWN_FAILED_FROZEN:
                        cpufreq_add_dev(sys_dev);
                        break;
                }
 
 
        switch (action) {
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                cpufreq_update_policy(cpu);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                cpufreq_stats_free_table(cpu);
                break;
        }
 
 
        switch (action) {
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                coretemp_device_add(cpu);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                coretemp_device_remove(cpu);
                break;
        }
 
 
        switch (action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
                if(!create_comp_task(pool, cpu)) {
                        ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
                }
                break;
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
                ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
                cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
                kthread_bind(cct->task, any_online_cpu(cpu_online_map));
                destroy_comp_task(pool, cpu);
                break;
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
                cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
                kthread_bind(cct->task, cpu);
                wake_up_process(cct->task);
                break;
        case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
                ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
                break;
        case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
                ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
                destroy_comp_task(pool, cpu);
                take_over_work(pool, cpu);
 
 
        switch (val) {
        case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
                printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
                       cpu);
                decache_vcpus_on_cpu(cpu);
                                         NULL, 0, 1);
                break;
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
                       cpu);
                smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
 
 static int buffer_cpu_notify(struct notifier_block *self,
                              unsigned long action, void *hcpu)
 {
-       if (action == CPU_DEAD)
+       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
                buffer_exit_cpu((unsigned long)hcpu);
        return NOTIFY_OK;
 }
 
                        per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu);
        switch (action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                /* Easy Case - initialize the area and locks, and
                 * then rebalance when online does everything else for us. */
                memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
                break;
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                xfs_icsb_lock(mp);
                xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
                xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
                xfs_icsb_unlock(mp);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                /* Disable all the counters, then fold the dead cpu's
                 * count into the total on the global superblock and
                 * re-enable the counters. */
 
 #define CPU_LOCK_ACQUIRE       0x0008 /* Acquire all hotcpu locks */
 #define CPU_LOCK_RELEASE       0x0009 /* Release all hotcpu locks */
 
+/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
+ * operation in progress
+ */
+#define CPU_TASKS_FROZEN       0x0010
+
+#define CPU_ONLINE_FROZEN      (CPU_ONLINE | CPU_TASKS_FROZEN)
+#define CPU_UP_PREPARE_FROZEN  (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
+#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
+#define CPU_DOWN_PREPARE_FROZEN        (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
+#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
+#define CPU_DEAD_FROZEN                (CPU_DEAD | CPU_TASKS_FROZEN)
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_NOTIFIER_H */
 
 }
 
 /* Requires cpu_add_remove_lock to be held */
-static int _cpu_down(unsigned int cpu)
+static int _cpu_down(unsigned int cpu, int tasks_frozen)
 {
        int err, nr_calls = 0;
        struct task_struct *p;
        cpumask_t old_allowed, tmp;
        void *hcpu = (void *)(long)cpu;
+       unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 
        if (num_online_cpus() == 1)
                return -EBUSY;
                return -EINVAL;
 
        raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
-       err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
+       err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
                                        hcpu, -1, &nr_calls);
        if (err == NOTIFY_BAD) {
-               __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, hcpu,
-                                         nr_calls, NULL);
+               __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
+                                         hcpu, nr_calls, NULL);
                printk("%s: attempt to take down CPU %u failed\n",
                                __FUNCTION__, cpu);
                err = -EINVAL;
 
        if (IS_ERR(p) || cpu_online(cpu)) {
                /* CPU didn't die: tell everyone.  Can't complain. */
-               if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
+               if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
                                            hcpu) == NOTIFY_BAD)
                        BUG();
 
        __cpu_die(cpu);
 
        /* CPU is completely dead: tell everyone.  Too late to complain. */
-       if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu) == NOTIFY_BAD)
+       if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod,
+                                   hcpu) == NOTIFY_BAD)
                BUG();
 
        check_for_tasks(cpu);
 out_allowed:
        set_cpus_allowed(current, old_allowed);
 out_release:
-       raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE,
-                                               (void *)(long)cpu);
+       raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu);
        return err;
 }
 
        if (cpu_hotplug_disabled)
                err = -EBUSY;
        else
-               err = _cpu_down(cpu);
+               err = _cpu_down(cpu, 0);
 
        mutex_unlock(&cpu_add_remove_lock);
        return err;
 #endif /*CONFIG_HOTPLUG_CPU*/
 
 /* Requires cpu_add_remove_lock to be held */
-static int __cpuinit _cpu_up(unsigned int cpu)
+static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
 {
        int ret, nr_calls = 0;
        void *hcpu = (void *)(long)cpu;
+       unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 
        if (cpu_online(cpu) || !cpu_present(cpu))
                return -EINVAL;
 
        raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
-       ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu,
+       ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
                                                        -1, &nr_calls);
        if (ret == NOTIFY_BAD) {
                printk("%s: attempt to bring up CPU %u failed\n",
        BUG_ON(!cpu_online(cpu));
 
        /* Now call notifier in preparation. */
-       raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
+       raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
 
 out_notify:
        if (ret != 0)
                __raw_notifier_call_chain(&cpu_chain,
-                               CPU_UP_CANCELED, hcpu, nr_calls, NULL);
+                               CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
        raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu);
 
        return ret;
        if (cpu_hotplug_disabled)
                err = -EBUSY;
        else
-               err = _cpu_up(cpu);
+               err = _cpu_up(cpu, 0);
 
        mutex_unlock(&cpu_add_remove_lock);
        return err;
        for_each_online_cpu(cpu) {
                if (cpu == first_cpu)
                        continue;
-               error = _cpu_down(cpu);
+               error = _cpu_down(cpu, 1);
                if (!error) {
                        cpu_set(cpu, frozen_cpus);
                        printk("CPU%d is down\n", cpu);
        suspend_cpu_hotplug = 1;
        printk("Enabling non-boot CPUs ...\n");
        for_each_cpu_mask(cpu, frozen_cpus) {
-               error = _cpu_up(cpu);
+               error = _cpu_up(cpu, 1);
                if (!error) {
                        printk("CPU%d is up\n", cpu);
                        continue;
 
        switch (action) {
 
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                init_hrtimers_cpu(cpu);
                break;
 
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
                migrate_hrtimers(cpu);
                break;
 
 
        switch (action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                node = cpu_to_node(cpu);
                per_cpu(cpu_profile_flip, cpu) = 0;
                if (!per_cpu(cpu_profile_hits, cpu)[1]) {
                __free_page(page);
                return NOTIFY_BAD;
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                cpu_set(cpu, prof_cpu_mask);
                break;
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                cpu_clear(cpu, prof_cpu_mask);
                if (per_cpu(cpu_profile_hits, cpu)[0]) {
                        page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
 
        long cpu = (long)hcpu;
        switch (action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                rcu_online_cpu(cpu);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                rcu_offline_cpu(cpu);
                break;
        default:
 
 
        switch(action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                mutex_lock(&relay_channels_mutex);
                list_for_each_entry(chan, &relay_channels, list) {
                        if (chan->buf[hotcpu])
                mutex_unlock(&relay_channels_mutex);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                /* No need to flush the cpu : will be flushed upon
                 * final relay_flush() call. */
                break;
 
                break;
 
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
                if (IS_ERR(p))
                        return NOTIFY_BAD;
                break;
 
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                /* Strictly unneccessary, as first user will wake it. */
                wake_up_process(cpu_rq(cpu)->migration_thread);
                break;
 
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
                if (!cpu_rq(cpu)->migration_thread)
                        break;
                /* Unbind it from offline cpu so it can run.  Fall thru. */
                break;
 
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                migrate_live_tasks(cpu);
                rq = cpu_rq(cpu);
                kthread_stop(rq->migration_thread);
 {
        switch (action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
        case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
                detach_destroy_domains(&cpu_online_map);
                return NOTIFY_OK;
 
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
        case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                /*
                 * Fall through and re-initialise the domains.
                 */
 
 
        switch (action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
                if (IS_ERR(p)) {
                        printk("ksoftirqd for %i failed\n", hotcpu);
                per_cpu(ksoftirqd, hotcpu) = p;
                break;
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                wake_up_process(per_cpu(ksoftirqd, hotcpu));
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
                if (!per_cpu(ksoftirqd, hotcpu))
                        break;
                /* Unbind so it can run.  Fall thru. */
                kthread_bind(per_cpu(ksoftirqd, hotcpu),
                             any_online_cpu(cpu_online_map));
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                p = per_cpu(ksoftirqd, hotcpu);
                per_cpu(ksoftirqd, hotcpu) = NULL;
                kthread_stop(p);
 
 
        switch (action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                BUG_ON(per_cpu(watchdog_task, hotcpu));
                p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
                if (IS_ERR(p)) {
                kthread_bind(p, hotcpu);
                break;
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                wake_up_process(per_cpu(watchdog_task, hotcpu));
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
                if (!per_cpu(watchdog_task, hotcpu))
                        break;
                /* Unbind so it can run.  Fall thru. */
                kthread_bind(per_cpu(watchdog_task, hotcpu),
                             any_online_cpu(cpu_online_map));
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                p = per_cpu(watchdog_task, hotcpu);
                per_cpu(watchdog_task, hotcpu) = NULL;
                kthread_stop(p);
 
        long cpu = (long)hcpu;
        switch(action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                if (init_timers_cpu(cpu) < 0)
                        return NOTIFY_BAD;
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                migrate_timers(cpu);
                break;
 #endif
 
        struct cpu_workqueue_struct *cwq;
        struct workqueue_struct *wq;
 
+       action &= ~CPU_TASKS_FROZEN;
+
        switch (action) {
        case CPU_LOCK_ACQUIRE:
                mutex_lock(&workqueue_mutex);
 
        struct radix_tree_preload *rtp;
 
        /* Free per-cpu pool of perloaded nodes */
-       if (action == CPU_DEAD) {
+       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
                rtp = &per_cpu(radix_tree_preloads, cpu);
                while (rtp->nr) {
                        kmem_cache_free(radix_tree_node_cachep,
 
 
        switch (action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                if (process_zones(cpu))
                        ret = NOTIFY_BAD;
                break;
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                free_zone_pagesets(cpu);
                break;
        default:
 {
        int cpu = (unsigned long)hcpu;
 
-       if (action == CPU_DEAD) {
+       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
                local_irq_disable();
                __drain_pages(cpu);
                vm_events_fold_cpu(cpu);
 
                mutex_lock(&cache_chain_mutex);
                break;
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                /*
                 * We need to do this right in the beginning since
                 * alloc_arraycache's are going to use this list.
                }
                break;
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                start_cpu_timer(cpu);
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
                /*
                 * Shutdown cache reaper. Note that the cache_chain_mutex is
                 * held so that if cache_reap() is invoked it cannot do
                per_cpu(reap_work, cpu).work.func = NULL;
                break;
        case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
                start_cpu_timer(cpu);
                break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                /*
                 * Even if all the cpus of a node are down, we don't free the
                 * kmem_list3 of any cache. This to avoid a race between
                /* fall thru */
 #endif
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
                list_for_each_entry(cachep, &cache_chain, next) {
                        struct array_cache *nc;
                        struct array_cache *shared;
 
 
        switch (action) {
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                for_all_slabs(__flush_cpu_slab, cpu);
                break;
        default:
 
        long *committed;
 
        committed = &per_cpu(committed_space, (long)hcpu);
-       if (action == CPU_DEAD) {
+       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
                atomic_add(*committed, &vm_committed_space);
                *committed = 0;
                __lru_add_drain((long)hcpu);
 
        pg_data_t *pgdat;
        cpumask_t mask;
 
-       if (action == CPU_ONLINE) {
+       if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
                for_each_online_pgdat(pgdat) {
                        mask = node_to_cpumask(pgdat->node_id);
                        if (any_online_cpu(mask) != NR_CPUS)
 
 {
        switch (action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                refresh_zone_stat_thresholds();
                break;
        default:
 
        unsigned int cpu, oldcpu = (unsigned long)ocpu;
        struct softnet_data *sd, *oldsd;
 
-       if (action != CPU_DEAD)
+       if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
                return NOTIFY_OK;
 
        local_irq_disable();
 
                          unsigned long action,
                          void *hcpu)
 {
-       if (action == CPU_DEAD)
+       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
                __flow_cache_shrink((unsigned long)hcpu, 0);
        return NOTIFY_OK;
 }
 
 
        switch (action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                if (!percpu_populate(iucv_irq_data,
                                     sizeof(struct iucv_irq_data),
                                     GFP_KERNEL|GFP_DMA, cpu))
                }
                break;
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                percpu_depopulate(iucv_param, cpu);
                percpu_depopulate(iucv_irq_data, cpu);
                break;
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
        case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
                smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
                break;
        case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
                cpumask = iucv_buffer_cpumask;
                cpu_clear(cpu, cpumask);
                if (cpus_empty(cpumask))