#include <asm/power.h>
 #include <asm/mdesc.h>
 #include <asm/head.h>
+#include <asm/irq.h>
 
 #define DRV_MODULE_NAME                "ds"
 #define PFX DRV_MODULE_NAME    ": "
 
        kfree(resp);
 
+       /* Redistribute IRQs, taking into account the new cpus.  */
+       fixup_irqs();
+
        return 0;
 }
 
                              cpumask_t *mask)
 {
        struct ds_data *resp;
-       int resp_len, ncpus;
+       int resp_len, ncpus, cpu;
+       unsigned long flags;
 
        ncpus = cpus_weight(*mask);
        resp_len = dr_cpu_size_response(ncpus);
                             resp_len, ncpus, mask,
                             DR_CPU_STAT_UNCONFIGURED);
 
+       for_each_cpu_mask(cpu, *mask) {
+               int err;
+
+               printk(KERN_INFO PFX "CPU[%d]: Shutting down cpu %d...\n",
+                      smp_processor_id(), cpu);
+               err = cpu_down(cpu);
+               if (err)
+                       dr_cpu_mark(resp, cpu, ncpus,
+                                   DR_CPU_RES_FAILURE,
+                                   DR_CPU_STAT_CONFIGURED);
+       }
+
+       spin_lock_irqsave(&ds_lock, flags);
+       ds_send(ds_info->lp, resp, resp_len);
+       spin_unlock_irqrestore(&ds_lock, flags);
+
        kfree(resp);
 
-       return -EOPNOTSUPP;
+       return 0;
 }
 
 static void process_dr_cpu_list(struct ds_cap_state *cp)
 
 #include <linux/compat.h>
 #include <linux/tick.h>
 #include <linux/init.h>
+#include <linux/cpu.h>
 
 #include <asm/oplib.h>
 #include <asm/uaccess.h>
 
 /* #define VERBOSE_SHOWREGS */
 
-static void sparc64_yield(void)
+static void sparc64_yield(int cpu)
 {
        if (tlb_type != hypervisor)
                return;
        clear_thread_flag(TIF_POLLING_NRFLAG);
        smp_mb__after_clear_bit();
 
-       while (!need_resched()) {
+       while (!need_resched() && !cpu_is_offline(cpu)) {
                unsigned long pstate;
 
                /* Disable interrupts. */
                        : "=&r" (pstate)
                        : "i" (PSTATE_IE));
 
-               if (!need_resched())
+               if (!need_resched() && !cpu_is_offline(cpu))
                        sun4v_cpu_yield();
 
                /* Re-enable interrupts. */
 /* The idle loop on sparc64. */
 void cpu_idle(void)
 {
+       int cpu = smp_processor_id();
+
        set_thread_flag(TIF_POLLING_NRFLAG);
 
        while(1) {
                tick_nohz_stop_sched_tick();
-               while (!need_resched())
-                       sparc64_yield();
+
+               while (!need_resched() && !cpu_is_offline(cpu))
+                       sparc64_yield(cpu);
+
                tick_nohz_restart_sched_tick();
 
                preempt_enable_no_resched();
+
+#ifdef CONFIG_HOTPLUG_CPU
+               if (cpu_is_offline(cpu))
+                       cpu_play_dead();
+#endif
+
                schedule();
                preempt_disable();
        }
 
 #include <asm/prom.h>
 #include <asm/mdesc.h>
 #include <asm/ldc.h>
+#include <asm/hypervisor.h>
 
 extern void calibrate_delay(void);
 
 EXPORT_SYMBOL(cpu_core_map);
 
 static cpumask_t smp_commenced_mask;
-static cpumask_t cpu_callout_map;
 
 void smp_info(struct seq_file *m)
 {
                           i, cpu_data(i).clock_tick);
 }
 
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
+
 extern void setup_sparc64_timer(void);
 
 static volatile unsigned long callin_flag = 0;
        while (!cpu_isset(cpuid, smp_commenced_mask))
                rmb();
 
+       spin_lock(&call_lock);
        cpu_set(cpuid, cpu_online_map);
+       spin_unlock(&call_lock);
 
        /* idle thread is expected to have preempt disabled */
        preempt_disable();
        hv_err = sun4v_cpu_start(cpu, trampoline_ra,
                                 kimage_addr_to_ra(&sparc64_ttable_tl0),
                                 __pa(hdesc));
+       if (hv_err)
+               printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
+                      "gives error %lu\n", hv_err);
 }
 #endif
 
        p = fork_idle(cpu);
        callin_flag = 0;
        cpu_new_thread = task_thread_info(p);
-       cpu_set(cpu, cpu_callout_map);
 
        if (tlb_type == hypervisor) {
                /* Alloc the mondo queues, cpu will load them.  */
                ret = 0;
        } else {
                printk("Processor %d is stuck.\n", cpu);
-               cpu_clear(cpu, cpu_callout_map);
                ret = -ENODEV;
        }
        cpu_new_thread = NULL;
        int wait;
 };
 
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
 static struct call_data_struct *call_data;
 
 extern unsigned long xcall_call_function;
 {
        unsigned int i;
 
-       for_each_possible_cpu(i) {
+       for_each_present_cpu(i) {
                unsigned int j;
 
                cpus_clear(cpu_core_map[i]);
                        continue;
                }
 
-               for_each_possible_cpu(j) {
+               for_each_present_cpu(j) {
                        if (cpu_data(i).core_id ==
                            cpu_data(j).core_id)
                                cpu_set(j, cpu_core_map[i]);
                }
        }
 
-       for_each_possible_cpu(i) {
+       for_each_present_cpu(i) {
                unsigned int j;
 
                cpus_clear(cpu_sibling_map[i]);
                        continue;
                }
 
-               for_each_possible_cpu(j) {
+               for_each_present_cpu(j) {
                        if (cpu_data(i).proc_id ==
                            cpu_data(j).proc_id)
                                cpu_set(j, cpu_sibling_map[i]);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
+void cpu_play_dead(void)
+{
+       int cpu = smp_processor_id();
+       unsigned long pstate;
+
+       idle_task_exit();
+
+       if (tlb_type == hypervisor) {
+               struct trap_per_cpu *tb = &trap_block[cpu];
+
+               sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
+                               tb->cpu_mondo_pa, 0);
+               sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
+                               tb->dev_mondo_pa, 0);
+               sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
+                               tb->resum_mondo_pa, 0);
+               sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
+                               tb->nonresum_mondo_pa, 0);
+       }
+
+       cpu_clear(cpu, smp_commenced_mask);
+       membar_safe("#Sync");
+
+       local_irq_disable();
+
+       __asm__ __volatile__(
+               "rdpr   %%pstate, %0\n\t"
+               "wrpr   %0, %1, %%pstate"
+               : "=r" (pstate)
+               : "i" (PSTATE_IE));
+
+       while (1)
+               barrier();
+}
+
 int __cpu_disable(void)
 {
-       printk(KERN_ERR "SMP: __cpu_disable() on cpu %d\n",
-              smp_processor_id());
-       return -ENODEV;
+       int cpu = smp_processor_id();
+       cpuinfo_sparc *c;
+       int i;
+
+       for_each_cpu_mask(i, cpu_core_map[cpu])
+               cpu_clear(cpu, cpu_core_map[i]);
+       cpus_clear(cpu_core_map[cpu]);
+
+       for_each_cpu_mask(i, cpu_sibling_map[cpu])
+               cpu_clear(cpu, cpu_sibling_map[i]);
+       cpus_clear(cpu_sibling_map[cpu]);
+
+       c = &cpu_data(cpu);
+
+       c->core_id = 0;
+       c->proc_id = -1;
+
+       spin_lock(&call_lock);
+       cpu_clear(cpu, cpu_online_map);
+       spin_unlock(&call_lock);
+
+       smp_wmb();
+
+       /* Make sure no interrupts point to this cpu.  */
+       fixup_irqs();
+
+       local_irq_enable();
+       mdelay(1);
+       local_irq_disable();
+
+       return 0;
 }
 
 void __cpu_die(unsigned int cpu)
 {
-       printk(KERN_ERR "SMP: __cpu_die(%u)\n", cpu);
+       int i;
+
+       for (i = 0; i < 100; i++) {
+               smp_rmb();
+               if (!cpu_isset(cpu, smp_commenced_mask))
+                       break;
+               msleep(100);
+       }
+       if (cpu_isset(cpu, smp_commenced_mask)) {
+               printk(KERN_ERR "CPU %u didn't die...\n", cpu);
+       } else {
+#if defined(CONFIG_SUN_LDOMS)
+               unsigned long hv_err;
+               int limit = 100;
+
+               do {
+                       hv_err = sun4v_cpu_stop(cpu);
+                       if (hv_err == HV_EOK) {
+                               cpu_clear(cpu, cpu_present_map);
+                               break;
+                       }
+               } while (--limit > 0);
+               if (limit <= 0) {
+                       printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
+                              hv_err);
+               }
+#endif
+       }
 }
 #endif