smp_imb(void)
 {
        /* Must wait other processors to flush their icache before continue. */
-       if (on_each_cpu(ipi_imb, NULL, 1))
-               printk(KERN_CRIT "smp_imb: timed out\n");
+       on_each_cpu(ipi_imb, NULL, 1);
 }
 EXPORT_SYMBOL(smp_imb);
 
 {
        /* Although we don't have any data to pass, we do want to
           synchronize with the other processors.  */
-       if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
-               printk(KERN_CRIT "flush_tlb_all: timed out\n");
-       }
+       on_each_cpu(ipi_flush_tlb_all, NULL, 1);
 }
 
 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
                }
        }
 
-       if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
-               printk(KERN_CRIT "flush_tlb_mm: timed out\n");
-       }
+       smp_call_function(ipi_flush_tlb_mm, mm, 1);
 
        preempt_enable();
 }
        data.mm = mm;
        data.addr = addr;
 
-       if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
-               printk(KERN_CRIT "flush_tlb_page: timed out\n");
-       }
+       smp_call_function(ipi_flush_tlb_page, &data, 1);
 
        preempt_enable();
 }
                }
        }
 
-       if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
-               printk(KERN_CRIT "flush_icache_page: timed out\n");
-       }
+       smp_call_function(ipi_flush_icache_page, mm, 1);
 
        preempt_enable();
 }
 
        model->reg_setup(®, ctr, &sys);
 
        /* Configure the registers on all cpus.  */
-       (void)smp_call_function(model->cpu_setup, ®, 1);
+       smp_call_function(model->cpu_setup, ®, 1);
        model->cpu_setup(®);
        return 0;
 }
 static int
 op_axp_start(void)
 {
-       (void)smp_call_function(op_axp_cpu_start, NULL, 1);
+       smp_call_function(op_axp_cpu_start, NULL, 1);
        op_axp_cpu_start(NULL);
        return 0;
 }
 static void
 op_axp_stop(void)
 {
-       (void)smp_call_function(op_axp_cpu_stop, NULL, 1);
+       smp_call_function(op_axp_cpu_stop, NULL, 1);
        op_axp_cpu_stop(NULL);
 }
 
 
 
 int bL_switcher_trace_trigger(void)
 {
-       int ret;
-
        preempt_disable();
 
        bL_switcher_trace_trigger_cpu(NULL);
-       ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
+       smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
 
        preempt_enable();
 
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
 
 
        }
 
        /* save the current system wide pmu states */
-       ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
-       if (ret) {
-               DPRINT(("on_each_cpu() failed: %d\n", ret));
-               goto cleanup_reserve;
-       }
+       on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
 
        /* officially change to the alternate interrupt handler */
        pfm_alt_intr_handler = hdl;
 pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
 {
        int i;
-       int ret;
 
        if (hdl == NULL) return -EINVAL;
 
 
        pfm_alt_intr_handler = NULL;
 
-       ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
-       if (ret) {
-               DPRINT(("on_each_cpu() failed: %d\n", ret));
-       }
+       on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
 
        for_each_online_cpu(i) {
                pfm_unreserve_session(NULL, 1, i);
 
        status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
        if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
                atomic_set(&uc_pool->status, 0);
-               status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
-               if (status || atomic_read(&uc_pool->status))
+               smp_call_function(uncached_ipi_visibility, uc_pool, 1);
+               if (atomic_read(&uc_pool->status))
                        goto failed;
        } else if (status != PAL_VISIBILITY_OK)
                goto failed;
        if (status != PAL_STATUS_SUCCESS)
                goto failed;
        atomic_set(&uc_pool->status, 0);
-       status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
-       if (status || atomic_read(&uc_pool->status))
+       smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
+       if (atomic_read(&uc_pool->status))
                goto failed;
 
        /*
 
        /* Call function on all CPUs.  One of us will make the
         * rtas call
         */
-       if (on_each_cpu(rtas_percpu_suspend_me, &data, 0))
-               atomic_set(&data.error, -EINVAL);
+       on_each_cpu(rtas_percpu_suspend_me, &data, 0);
 
        wait_for_completion(&done);
 
 
 
 int wbinvd_on_all_cpus(void)
 {
-       return on_each_cpu(__wbinvd, NULL, 1);
+       on_each_cpu(__wbinvd, NULL, 1);
+       return 0;
 }
 EXPORT_SYMBOL(wbinvd_on_all_cpus);
 
 
 void global_cache_flush(void)
 {
-       if (on_each_cpu(ipi_handler, NULL, 1) != 0)
-               panic(PFX "timed out waiting for the other CPUs!\n");
+       on_each_cpu(ipi_handler, NULL, 1);
 }
 EXPORT_SYMBOL(global_cache_flush);
 
 
 /*
  * Call a function on all processors
  */
-int on_each_cpu(smp_call_func_t func, void *info, int wait);
+void on_each_cpu(smp_call_func_t func, void *info, int wait);
 
 /*
  * Call a function on processors specified by mask, which might include
 /*
  * Call a function on all other processors
  */
-int smp_call_function(smp_call_func_t func, void *info, int wait);
+void smp_call_function(smp_call_func_t func, void *info, int wait);
 void smp_call_function_many(const struct cpumask *mask,
                            smp_call_func_t func, void *info, bool wait);
 
  *     These macros fold the SMP functionality into a single CPU system
  */
 #define raw_smp_processor_id()                 0
-static inline int up_smp_call_function(smp_call_func_t func, void *info)
+static inline void up_smp_call_function(smp_call_func_t func, void *info)
 {
-       return 0;
 }
 #define smp_call_function(func, info, wait) \
                        (up_smp_call_function(func, info))
 
  * You must not call this function with disabled interrupts or from a
  * hardware interrupt handler or from a bottom half handler.
  */
-int smp_call_function(smp_call_func_t func, void *info, int wait)
+void smp_call_function(smp_call_func_t func, void *info, int wait)
 {
        preempt_disable();
        smp_call_function_many(cpu_online_mask, func, info, wait);
        preempt_enable();
-
-       return 0;
 }
 EXPORT_SYMBOL(smp_call_function);
 
  * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
  * of local_irq_disable/enable().
  */
-int on_each_cpu(void (*func) (void *info), void *info, int wait)
+void on_each_cpu(void (*func) (void *info), void *info, int wait)
 {
        unsigned long flags;
-       int ret = 0;
 
        preempt_disable();
-       ret = smp_call_function(func, info, wait);
+       smp_call_function(func, info, wait);
        local_irq_save(flags);
        func(info);
        local_irq_restore(flags);
        preempt_enable();
-       return ret;
 }
 EXPORT_SYMBOL(on_each_cpu);
 
 
 }
 EXPORT_SYMBOL(smp_call_function_single_async);
 
-int on_each_cpu(smp_call_func_t func, void *info, int wait)
+void on_each_cpu(smp_call_func_t func, void *info, int wait)
 {
        unsigned long flags;
 
        local_irq_save(flags);
        func(info);
        local_irq_restore(flags);
-       return 0;
 }
 EXPORT_SYMBOL(on_each_cpu);