#include <linux/clocksource.h>
 #include <linux/hrtimer.h>
-#include <linux/workqueue.h>
 
 struct arch_timer_context {
        /* Registers: control register, timer value */
        /* Background timer used when the guest is not running */
        struct hrtimer                  bg_timer;
 
-       /* Work queued with the above timer expires */
-       struct work_struct              expired;
-
        /* Physical timer emulation */
        struct hrtimer                  phys_timer;
 
 
                      HRTIMER_MODE_ABS);
 }
 
-static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work)
+static void soft_timer_cancel(struct hrtimer *hrt)
 {
        hrtimer_cancel(hrt);
-       if (work)
-               cancel_work_sync(work);
 }
 
 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-/*
- * Work function for handling the backup timer that we schedule when a vcpu is
- * no longer running, but had a timer programmed to fire in the future.
- */
-static void kvm_timer_inject_irq_work(struct work_struct *work)
-{
-       struct kvm_vcpu *vcpu;
-
-       vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
-
-       /*
-        * If the vcpu is blocked we want to wake it up so that it will see
-        * the timer has expired when entering the guest.
-        */
-       kvm_vcpu_wake_up(vcpu);
-}
-
 static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
 {
        u64 cval, now;
                return HRTIMER_RESTART;
        }
 
-       schedule_work(&timer->expired);
+       kvm_vcpu_wake_up(vcpu);
        return HRTIMER_NORESTART;
 }
 
         * then we also don't need a soft timer.
         */
        if (kvm_timer_should_fire(ptimer) || !kvm_timer_irq_can_fire(ptimer)) {
-               soft_timer_cancel(&timer->phys_timer, NULL);
+               soft_timer_cancel(&timer->phys_timer);
                return;
        }
 
 
        vtimer_restore_state(vcpu);
 
-       soft_timer_cancel(&timer->bg_timer, &timer->expired);
+       soft_timer_cancel(&timer->bg_timer);
 }
 
 static void set_cntvoff(u64 cntvoff)
         * In any case, we re-schedule the hrtimer for the physical timer when
         * coming back to the VCPU thread in kvm_timer_vcpu_load().
         */
-       soft_timer_cancel(&timer->phys_timer, NULL);
+       soft_timer_cancel(&timer->phys_timer);
 
        /*
         * The kernel may decide to run userspace after calling vcpu_put, so
        update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
        vcpu_ptimer(vcpu)->cntvoff = 0;
 
-       INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
        hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
        timer->bg_timer.function = kvm_bg_timer_expire;
 
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
        struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 
-       soft_timer_cancel(&timer->bg_timer, &timer->expired);
-       soft_timer_cancel(&timer->phys_timer, NULL);
+       soft_timer_cancel(&timer->bg_timer);
+       soft_timer_cancel(&timer->phys_timer);
        kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq);
 }