#include <linux/suspend.h>
 #include <linux/platform_device.h>
 #include <asm/appldata.h>
-#include <asm/timer.h>
+#include <asm/vtimer.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/smp.h>
 /*
  * Timer
  */
-static DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
-static atomic_t appldata_expire_count = ATOMIC_INIT(0);
+static struct vtimer_list appldata_timer;
 
 static DEFINE_SPINLOCK(appldata_timer_lock);
 static int appldata_interval = APPLDATA_CPU_INTERVAL;
  */
 static void appldata_timer_function(unsigned long data)
 {
-       if (atomic_dec_and_test(&appldata_expire_count)) {
-               atomic_set(&appldata_expire_count, num_online_cpus());
-               queue_work(appldata_wq, (struct work_struct *) data);
-       }
+       queue_work(appldata_wq, (struct work_struct *) data);
 }
 
 /*
        struct list_head *lh;
        struct appldata_ops *ops;
 
-       get_online_cpus();
        mutex_lock(&appldata_ops_mutex);
        list_for_each(lh, &appldata_ops_list) {
                ops = list_entry(lh, struct appldata_ops, list);
                }
        }
        mutex_unlock(&appldata_ops_mutex);
-       put_online_cpus();
 }
 
 /*
 
 /****************************** /proc stuff **********************************/
 
-/*
- * appldata_mod_vtimer_wrap()
- *
- * wrapper function for mod_virt_timer(), because smp_call_function_single()
- * accepts only one parameter.
- */
-static void __appldata_mod_vtimer_wrap(void *p) {
-       struct {
-               struct vtimer_list *timer;
-               u64    expires;
-       } *args = p;
-       mod_virt_timer_periodic(args->timer, args->expires);
-}
-
 #define APPLDATA_ADD_TIMER     0
 #define APPLDATA_DEL_TIMER     1
 #define APPLDATA_MOD_TIMER     2
  * Add, delete or modify virtual timers on all online cpus.
  * The caller needs to get the appldata_timer_lock spinlock.
  */
-static void
-__appldata_vtimer_setup(int cmd)
+static void __appldata_vtimer_setup(int cmd)
 {
-       u64 per_cpu_interval;
-       int i;
+       u64 timer_interval = (u64) appldata_interval * 1000 * TOD_MICRO;
 
        switch (cmd) {
        case APPLDATA_ADD_TIMER:
                if (appldata_timer_active)
                        break;
-               per_cpu_interval = (u64) (appldata_interval*1000 /
-                                         num_online_cpus()) * TOD_MICRO;
-               for_each_online_cpu(i) {
-                       per_cpu(appldata_timer, i).expires = per_cpu_interval;
-                       smp_call_function_single(i, add_virt_timer_periodic,
-                                                &per_cpu(appldata_timer, i),
-                                                1);
-               }
+               appldata_timer.expires = timer_interval;
+               add_virt_timer_periodic(&appldata_timer);
                appldata_timer_active = 1;
                break;
        case APPLDATA_DEL_TIMER:
-               for_each_online_cpu(i)
-                       del_virt_timer(&per_cpu(appldata_timer, i));
+               del_virt_timer(&appldata_timer);
                if (!appldata_timer_active)
                        break;
                appldata_timer_active = 0;
-               atomic_set(&appldata_expire_count, num_online_cpus());
                break;
        case APPLDATA_MOD_TIMER:
-               per_cpu_interval = (u64) (appldata_interval*1000 /
-                                         num_online_cpus()) * TOD_MICRO;
                if (!appldata_timer_active)
                        break;
-               for_each_online_cpu(i) {
-                       struct {
-                               struct vtimer_list *timer;
-                               u64    expires;
-                       } args;
-                       args.timer = &per_cpu(appldata_timer, i);
-                       args.expires = per_cpu_interval;
-                       smp_call_function_single(i, __appldata_mod_vtimer_wrap,
-                                                &args, 1);
-               }
+               mod_virt_timer_periodic(&appldata_timer, timer_interval);
        }
 }
 
        len = *lenp;
        if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
                return -EFAULT;
-       get_online_cpus();
        spin_lock(&appldata_timer_lock);
        if (buf[0] == '1')
                __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
        else if (buf[0] == '0')
                __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
        spin_unlock(&appldata_timer_lock);
-       put_online_cpus();
 out:
        *lenp = len;
        *ppos += len;
                goto out;
        }
        len = *lenp;
-       if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) {
+       if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
                return -EFAULT;
-       }
        interval = 0;
        sscanf(buf, "%i", &interval);
        if (interval <= 0)
                return -EINVAL;
 
-       get_online_cpus();
        spin_lock(&appldata_timer_lock);
        appldata_interval = interval;
        __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
        spin_unlock(&appldata_timer_lock);
-       put_online_cpus();
 out:
        *lenp = len;
        *ppos += len;
        int rc;
        struct list_head *lh;
 
-       get_online_cpus();
        spin_lock(&appldata_timer_lock);
        if (appldata_timer_active) {
                __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
                appldata_timer_suspended = 1;
        }
        spin_unlock(&appldata_timer_lock);
-       put_online_cpus();
 
        mutex_lock(&appldata_ops_mutex);
        list_for_each(lh, &appldata_ops_list) {
        int rc;
        struct list_head *lh;
 
-       get_online_cpus();
        spin_lock(&appldata_timer_lock);
        if (appldata_timer_suspended) {
                __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
                appldata_timer_suspended = 0;
        }
        spin_unlock(&appldata_timer_lock);
-       put_online_cpus();
 
        mutex_lock(&appldata_ops_mutex);
        list_for_each(lh, &appldata_ops_list) {
 
 /******************************* init / exit *********************************/
 
-static void __cpuinit appldata_online_cpu(int cpu)
-{
-       init_virt_timer(&per_cpu(appldata_timer, cpu));
-       per_cpu(appldata_timer, cpu).function = appldata_timer_function;
-       per_cpu(appldata_timer, cpu).data = (unsigned long)
-               &appldata_work;
-       atomic_inc(&appldata_expire_count);
-       spin_lock(&appldata_timer_lock);
-       __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
-       spin_unlock(&appldata_timer_lock);
-}
-
-static void __cpuinit appldata_offline_cpu(int cpu)
-{
-       del_virt_timer(&per_cpu(appldata_timer, cpu));
-       if (atomic_dec_and_test(&appldata_expire_count)) {
-               atomic_set(&appldata_expire_count, num_online_cpus());
-               queue_work(appldata_wq, &appldata_work);
-       }
-       spin_lock(&appldata_timer_lock);
-       __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
-       spin_unlock(&appldata_timer_lock);
-}
-
-static int __cpuinit appldata_cpu_notify(struct notifier_block *self,
-                                        unsigned long action,
-                                        void *hcpu)
-{
-       switch (action) {
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-               appldata_online_cpu((long) hcpu);
-               break;
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               appldata_offline_cpu((long) hcpu);
-               break;
-       default:
-               break;
-       }
-       return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata appldata_nb = {
-       .notifier_call = appldata_cpu_notify,
-};
-
 /*
  * appldata_init()
  *
  */
 static int __init appldata_init(void)
 {
-       int i, rc;
+       int rc;
+
+       appldata_timer.function = appldata_timer_function;
+       appldata_timer.data = (unsigned long) &appldata_work;
 
        rc = platform_driver_register(&appldata_pdrv);
        if (rc)
                goto out_device;
        }
 
-       get_online_cpus();
-       for_each_online_cpu(i)
-               appldata_online_cpu(i);
-       put_online_cpus();
-
-       /* Register cpu hotplug notifier */
-       register_hotcpu_notifier(&appldata_nb);
-
        appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
        return 0;
 
 
 /*
- *  arch/s390/kernel/vtime.c
  *    Virtual cpu timer based timer functions.
  *
- *  S390 version
- *    Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Copyright IBM Corp. 2004, 2012
  *    Author(s): Jan Glauber <jan.glauber@de.ibm.com>
  */
 
-#include <linux/module.h>
+#include <linux/kernel_stat.h>
+#include <linux/notifier.h>
+#include <linux/kprobes.h>
+#include <linux/export.h>
 #include <linux/kernel.h>
-#include <linux/time.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/types.h>
 #include <linux/timex.h>
-#include <linux/notifier.h>
-#include <linux/kernel_stat.h>
-#include <linux/rcupdate.h>
-#include <linux/posix-timers.h>
+#include <linux/types.h>
+#include <linux/time.h>
 #include <linux/cpu.h>
-#include <linux/kprobes.h>
+#include <linux/smp.h>
 
-#include <asm/timer.h>
 #include <asm/irq_regs.h>
 #include <asm/cputime.h>
+#include <asm/vtimer.h>
 #include <asm/irq.h>
 #include "entry.h"
 
-static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
+static void virt_timer_expire(void);
 
 DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
 
-static inline __u64 get_vtimer(void)
+static LIST_HEAD(virt_timer_list);
+static DEFINE_SPINLOCK(virt_timer_lock);
+static atomic64_t virt_timer_current;
+static atomic64_t virt_timer_elapsed;
+
+static inline u64 get_vtimer(void)
 {
-       __u64 timer;
+       u64 timer;
 
-       asm volatile("STPT %0" : "=m" (timer));
+       asm volatile("stpt %0" : "=m" (timer));
        return timer;
 }
 
-static inline void set_vtimer(__u64 expires)
+static inline void set_vtimer(u64 expires)
 {
-       __u64 timer;
+       u64 timer;
 
-       asm volatile ("  STPT %0\n"  /* Store current cpu timer value */
-                     "  SPT %1"     /* Set new value immediately afterwards */
-                     : "=m" (timer) : "m" (expires) );
+       asm volatile(
+               "       stpt    %0\n"   /* Store current cpu timer value */
+               "       spt     %1"     /* Set new value imm. afterwards */
+               : "=m" (timer) : "m" (expires));
        S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
        S390_lowcore.last_update_timer = expires;
 }
 
+static inline int virt_timer_forward(u64 elapsed)
+{
+       BUG_ON(!irqs_disabled());
+
+       if (list_empty(&virt_timer_list))
+               return 0;
+       elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed);
+       return elapsed >= atomic64_read(&virt_timer_current);
+}
+
 /*
  * Update process times based on virtual cpu times stored by entry.S
  * to the lowcore fields user_timer, system_timer & steal_clock.
  */
-static void do_account_vtime(struct task_struct *tsk, int hardirq_offset)
+static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
 {
        struct thread_info *ti = task_thread_info(tsk);
-       __u64 timer, clock, user, system, steal;
+       u64 timer, clock, user, system, steal;
 
        timer = S390_lowcore.last_update_timer;
        clock = S390_lowcore.last_update_clock;
-       asm volatile ("  STPT %0\n"    /* Store current cpu timer value */
-                     "  STCK %1"      /* Store current tod clock value */
-                     : "=m" (S390_lowcore.last_update_timer),
-                       "=m" (S390_lowcore.last_update_clock) );
+       asm volatile(
+               "       stpt    %0\n"   /* Store current cpu timer value */
+               "       stck    %1"     /* Store current tod clock value */
+               : "=m" (S390_lowcore.last_update_timer),
+                 "=m" (S390_lowcore.last_update_clock));
        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
        S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
 
                S390_lowcore.steal_timer = 0;
                account_steal_time(steal);
        }
+
+       return virt_timer_forward(user + system);
 }
 
 void account_vtime(struct task_struct *prev, struct task_struct *next)
 
 void account_process_tick(struct task_struct *tsk, int user_tick)
 {
-       do_account_vtime(tsk, HARDIRQ_OFFSET);
+       if (do_account_vtime(tsk, HARDIRQ_OFFSET))
+               virt_timer_expire();
 }
 
 /*
 void account_system_vtime(struct task_struct *tsk)
 {
        struct thread_info *ti = task_thread_info(tsk);
-       __u64 timer, system;
+       u64 timer, system;
 
        timer = S390_lowcore.last_update_timer;
        S390_lowcore.last_update_timer = get_vtimer();
        S390_lowcore.steal_timer -= system;
        ti->system_timer = S390_lowcore.system_timer;
        account_system_time(tsk, 0, system, system);
+
+       virt_timer_forward(system);
 }
 EXPORT_SYMBOL_GPL(account_system_vtime);
 
 void __kprobes vtime_stop_cpu(void)
 {
        struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
-       struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
        unsigned long long idle_time;
        unsigned long psw_mask;
 
        idle->nohz_delay = 0;
 
        /* Call the assembler magic in entry.S */
-       psw_idle(idle, vq, psw_mask, !list_empty(&vq->list));
+       psw_idle(idle, psw_mask);
 
        /* Reenable preemption tracer. */
        start_critical_timings();
        /* Account time spent with enabled wait psw loaded as idle time. */
        idle->sequence++;
        smp_wmb();
-       idle_time = idle->idle_exit - idle->idle_enter;
+       idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
+       idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
        idle->idle_time += idle_time;
-       idle->idle_enter = idle->idle_exit = 0ULL;
        idle->idle_count++;
        account_idle_time(idle_time);
        smp_wmb();
        do {
                now = get_clock();
                sequence = ACCESS_ONCE(idle->sequence);
-               idle_enter = ACCESS_ONCE(idle->idle_enter);
-               idle_exit = ACCESS_ONCE(idle->idle_exit);
+               idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
+               idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
        } while ((sequence & 1) || (idle->sequence != sequence));
-       return idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
+       return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
 }
 
 /*
  */
 static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
 {
-       struct vtimer_list *event;
+       struct vtimer_list *tmp;
 
-       list_for_each_entry(event, head, entry) {
-               if (event->expires > timer->expires) {
-                       list_add_tail(&timer->entry, &event->entry);
+       list_for_each_entry(tmp, head, entry) {
+               if (tmp->expires > timer->expires) {
+                       list_add_tail(&timer->entry, &tmp->entry);
                        return;
                }
        }
 }
 
 /*
- * Do the callback functions of expired vtimer events.
- * Called from within the interrupt handler.
- */
-static void do_callbacks(struct list_head *cb_list)
-{
-       struct vtimer_queue *vq;
-       struct vtimer_list *event, *tmp;
-
-       if (list_empty(cb_list))
-               return;
-
-       vq = &__get_cpu_var(virt_cpu_timer);
-
-       list_for_each_entry_safe(event, tmp, cb_list, entry) {
-               list_del_init(&event->entry);
-               (event->function)(event->data);
-               if (event->interval) {
-                       /* Recharge interval timer */
-                       event->expires = event->interval + vq->elapsed;
-                       spin_lock(&vq->lock);
-                       list_add_sorted(event, &vq->list);
-                       spin_unlock(&vq->lock);
-               }
-       }
-}
-
-/*
- * Handler for the virtual CPU timer.
+ * Handler for expired virtual CPU timer.
  */
-static void do_cpu_timer_interrupt(struct ext_code ext_code,
-                                  unsigned int param32, unsigned long param64)
+static void virt_timer_expire(void)
 {
-       struct vtimer_queue *vq;
-       struct vtimer_list *event, *tmp;
-       struct list_head cb_list;       /* the callback queue */
-       __u64 elapsed, next;
-
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++;
-       INIT_LIST_HEAD(&cb_list);
-       vq = &__get_cpu_var(virt_cpu_timer);
-
-       /* walk timer list, fire all expired events */
-       spin_lock(&vq->lock);
-
-       elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer);
-       BUG_ON((s64) elapsed < 0);
-       vq->elapsed = 0;
-       list_for_each_entry_safe(event, tmp, &vq->list, entry) {
-               if (event->expires < elapsed)
+       struct vtimer_list *timer, *tmp;
+       unsigned long elapsed;
+       LIST_HEAD(cb_list);
+
+       /* walk timer list, fire all expired timers */
+       spin_lock(&virt_timer_lock);
+       elapsed = atomic64_read(&virt_timer_elapsed);
+       list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) {
+               if (timer->expires < elapsed)
                        /* move expired timer to the callback queue */
-                       list_move_tail(&event->entry, &cb_list);
+                       list_move_tail(&timer->entry, &cb_list);
                else
-                       event->expires -= elapsed;
+                       timer->expires -= elapsed;
        }
-       spin_unlock(&vq->lock);
-
-       do_callbacks(&cb_list);
-
-       /* next event is first in list */
-       next = VTIMER_MAX_SLICE;
-       spin_lock(&vq->lock);
-       if (!list_empty(&vq->list)) {
-               event = list_first_entry(&vq->list, struct vtimer_list, entry);
-               next = event->expires;
+       if (!list_empty(&virt_timer_list)) {
+               timer = list_first_entry(&virt_timer_list,
+                                        struct vtimer_list, entry);
+               atomic64_set(&virt_timer_current, timer->expires);
+       }
+       atomic64_sub(elapsed, &virt_timer_elapsed);
+       spin_unlock(&virt_timer_lock);
+
+       /* Do callbacks and recharge periodic timers */
+       list_for_each_entry_safe(timer, tmp, &cb_list, entry) {
+               list_del_init(&timer->entry);
+               timer->function(timer->data);
+               if (timer->interval) {
+                       /* Recharge interval timer */
+                       timer->expires = timer->interval +
+                               atomic64_read(&virt_timer_elapsed);
+                       spin_lock(&virt_timer_lock);
+                       list_add_sorted(timer, &virt_timer_list);
+                       spin_unlock(&virt_timer_lock);
+               }
        }
-       spin_unlock(&vq->lock);
-       /*
-        * To improve precision add the time spent by the
-        * interrupt handler to the elapsed time.
-        * Note: CPU timer counts down and we got an interrupt,
-        *       the current content is negative
-        */
-       elapsed = S390_lowcore.async_enter_timer - get_vtimer();
-       set_vtimer(next - elapsed);
-       vq->timer = next - elapsed;
-       vq->elapsed = elapsed;
 }
 
 void init_virt_timer(struct vtimer_list *timer)
 
 static inline int vtimer_pending(struct vtimer_list *timer)
 {
-       return (!list_empty(&timer->entry));
+       return !list_empty(&timer->entry);
 }
 
-/*
- * this function should only run on the specified CPU
- */
 static void internal_add_vtimer(struct vtimer_list *timer)
 {
-       struct vtimer_queue *vq;
-       unsigned long flags;
-       __u64 left, expires;
-
-       vq = &per_cpu(virt_cpu_timer, timer->cpu);
-       spin_lock_irqsave(&vq->lock, flags);
-
-       BUG_ON(timer->cpu != smp_processor_id());
-
-       if (list_empty(&vq->list)) {
-               /* First timer on this cpu, just program it. */
-               list_add(&timer->entry, &vq->list);
-               set_vtimer(timer->expires);
-               vq->timer = timer->expires;
-               vq->elapsed = 0;
+       if (list_empty(&virt_timer_list)) {
+               /* First timer, just program it. */
+               atomic64_set(&virt_timer_current, timer->expires);
+               atomic64_set(&virt_timer_elapsed, 0);
+               list_add(&timer->entry, &virt_timer_list);
        } else {
-               /* Check progress of old timers. */
-               expires = timer->expires;
-               left = get_vtimer();
-               if (likely((s64) expires < (s64) left)) {
+               /* Update timer against current base. */
+               timer->expires += atomic64_read(&virt_timer_elapsed);
+               if (likely((s64) timer->expires <
+                          (s64) atomic64_read(&virt_timer_current)))
                        /* The new timer expires before the current timer. */
-                       set_vtimer(expires);
-                       vq->elapsed += vq->timer - left;
-                       vq->timer = expires;
-               } else {
-                       vq->elapsed += vq->timer - left;
-                       vq->timer = left;
-               }
-               /* Insert new timer into per cpu list. */
-               timer->expires += vq->elapsed;
-               list_add_sorted(timer, &vq->list);
+                       atomic64_set(&virt_timer_current, timer->expires);
+               /* Insert new timer into the list. */
+               list_add_sorted(timer, &virt_timer_list);
        }
-
-       spin_unlock_irqrestore(&vq->lock, flags);
-       /* release CPU acquired in prepare_vtimer or mod_virt_timer() */
-       put_cpu();
 }
 
-static inline void prepare_vtimer(struct vtimer_list *timer)
+static void __add_vtimer(struct vtimer_list *timer, int periodic)
 {
-       BUG_ON(!timer->function);
-       BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE);
-       BUG_ON(vtimer_pending(timer));
-       timer->cpu = get_cpu();
+       unsigned long flags;
+
+       timer->interval = periodic ? timer->expires : 0;
+       spin_lock_irqsave(&virt_timer_lock, flags);
+       internal_add_vtimer(timer);
+       spin_unlock_irqrestore(&virt_timer_lock, flags);
 }
 
 /*
  * add_virt_timer - add an oneshot virtual CPU timer
  */
-void add_virt_timer(void *new)
+void add_virt_timer(struct vtimer_list *timer)
 {
-       struct vtimer_list *timer;
-
-       timer = (struct vtimer_list *)new;
-       prepare_vtimer(timer);
-       timer->interval = 0;
-       internal_add_vtimer(timer);
+       __add_vtimer(timer, 0);
 }
 EXPORT_SYMBOL(add_virt_timer);
 
 /*
  * add_virt_timer_int - add an interval virtual CPU timer
  */
-void add_virt_timer_periodic(void *new)
+void add_virt_timer_periodic(struct vtimer_list *timer)
 {
-       struct vtimer_list *timer;
-
-       timer = (struct vtimer_list *)new;
-       prepare_vtimer(timer);
-       timer->interval = timer->expires;
-       internal_add_vtimer(timer);
+       __add_vtimer(timer, 1);
 }
 EXPORT_SYMBOL(add_virt_timer_periodic);
 
-static int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic)
+static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic)
 {
-       struct vtimer_queue *vq;
        unsigned long flags;
-       int cpu;
+       int rc;
 
        BUG_ON(!timer->function);
-       BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
 
        if (timer->expires == expires && vtimer_pending(timer))
                return 1;
-
-       cpu = get_cpu();
-       vq = &per_cpu(virt_cpu_timer, cpu);
-
-       /* disable interrupts before test if timer is pending */
-       spin_lock_irqsave(&vq->lock, flags);
-
-       /* if timer isn't pending add it on the current CPU */
-       if (!vtimer_pending(timer)) {
-               spin_unlock_irqrestore(&vq->lock, flags);
-
-               if (periodic)
-                       timer->interval = expires;
-               else
-                       timer->interval = 0;
-               timer->expires = expires;
-               timer->cpu = cpu;
-               internal_add_vtimer(timer);
-               return 0;
-       }
-
-       /* check if we run on the right CPU */
-       BUG_ON(timer->cpu != cpu);
-
-       list_del_init(&timer->entry);
+       spin_lock_irqsave(&virt_timer_lock, flags);
+       rc = vtimer_pending(timer);
+       if (rc)
+               list_del_init(&timer->entry);
+       timer->interval = periodic ? expires : 0;
        timer->expires = expires;
-       if (periodic)
-               timer->interval = expires;
-
-       /* the timer can't expire anymore so we can release the lock */
-       spin_unlock_irqrestore(&vq->lock, flags);
        internal_add_vtimer(timer);
-       return 1;
+       spin_unlock_irqrestore(&virt_timer_lock, flags);
+       return rc;
 }
 
 /*
- * If we change a pending timer the function must be called on the CPU
- * where the timer is running on.
- *
  * returns whether it has modified a pending timer (1) or not (0)
  */
-int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
+int mod_virt_timer(struct vtimer_list *timer, u64 expires)
 {
        return __mod_vtimer(timer, expires, 0);
 }
 EXPORT_SYMBOL(mod_virt_timer);
 
 /*
- * If we change a pending timer the function must be called on the CPU
- * where the timer is running on.
- *
  * returns whether it has modified a pending timer (1) or not (0)
  */
-int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires)
+int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires)
 {
        return __mod_vtimer(timer, expires, 1);
 }
 EXPORT_SYMBOL(mod_virt_timer_periodic);
 
 /*
- * delete a virtual timer
+ * Delete a virtual timer.
  *
  * returns whether the deleted timer was pending (1) or not (0)
  */
 int del_virt_timer(struct vtimer_list *timer)
 {
        unsigned long flags;
-       struct vtimer_queue *vq;
 
-       /* check if timer is pending */
        if (!vtimer_pending(timer))
                return 0;
-
-       vq = &per_cpu(virt_cpu_timer, timer->cpu);
-       spin_lock_irqsave(&vq->lock, flags);
-
-       /* we don't interrupt a running timer, just let it expire! */
+       spin_lock_irqsave(&virt_timer_lock, flags);
        list_del_init(&timer->entry);
-
-       spin_unlock_irqrestore(&vq->lock, flags);
+       spin_unlock_irqrestore(&virt_timer_lock, flags);
        return 1;
 }
 EXPORT_SYMBOL(del_virt_timer);
 /*
  * Start the virtual CPU timer on the current CPU.
  */
-void init_cpu_vtimer(void)
+void __cpuinit init_cpu_vtimer(void)
 {
-       struct vtimer_queue *vq;
-
-       /* initialize per cpu vtimer structure */
-       vq = &__get_cpu_var(virt_cpu_timer);
-       INIT_LIST_HEAD(&vq->list);
-       spin_lock_init(&vq->lock);
-
-       /* enable cpu timer interrupts */
-       __ctl_set_bit(0,10);
-
        /* set initial cpu timer */
-       set_vtimer(0x7fffffffffffffffULL);
+       set_vtimer(VTIMER_MAX_SLICE);
 }
 
 static int __cpuinit s390_nohz_notify(struct notifier_block *self,
 
 void __init vtime_init(void)
 {
-       /* request the cpu timer external interrupt */
-       if (register_external_interrupt(0x1005, do_cpu_timer_interrupt))
-               panic("Couldn't request external interrupt 0x1005");
-
        /* Enable cpu timer interrupts on the boot cpu. */
        init_cpu_vtimer();
        cpu_notifier(s390_nohz_notify, 0);
 }
-