void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.timer_cpu.active_cleared_last = false;
+       vcpu_vtimer(vcpu)->active_cleared_last = false;
 }
 
 static u64 kvm_phys_timer_read(void)
 {
        u64 cval, now;
 
-       cval = vcpu->arch.timer_cpu.cntv_cval;
+       cval = vcpu_vtimer(vcpu)->cnt_cval;
        now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
 
        if (now < cval) {
 
 static bool kvm_timer_irq_can_fire(struct kvm_vcpu *vcpu)
 {
-       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 
-       return !(timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
-               (timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE);
+       return !(vtimer->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
+               (vtimer->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
 }
 
 bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
 {
-       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
        u64 cval, now;
 
        if (!kvm_timer_irq_can_fire(vcpu))
                return false;
 
-       cval = timer->cntv_cval;
+       cval = vtimer->cnt_cval;
        now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
 
        return cval <= now;
 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
 {
        int ret;
-       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 
        BUG_ON(!vgic_initialized(vcpu->kvm));
 
-       timer->active_cleared_last = false;
-       timer->irq.level = new_level;
-       trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->irq.irq,
-                                  timer->irq.level);
+       vtimer->active_cleared_last = false;
+       vtimer->irq.level = new_level;
+       trace_kvm_timer_update_irq(vcpu->vcpu_id, vtimer->irq.irq,
+                                  vtimer->irq.level);
 
        ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
-                                        timer->irq.irq,
-                                        timer->irq.level);
+                                        vtimer->irq.irq,
+                                        vtimer->irq.level);
        WARN_ON(ret);
 }
 
 static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 
        /*
         * If userspace modified the timer registers via SET_ONE_REG before
-        * the vgic was initialized, we mustn't set the timer->irq.level value
+        * the vgic was initialized, we mustn't set the vtimer->irq.level value
         * because the guest would never see the interrupt.  Instead wait
         * until we call this function from kvm_timer_flush_hwstate.
         */
        if (!vgic_initialized(vcpu->kvm) || !timer->enabled)
                return -ENODEV;
 
-       if (kvm_timer_should_fire(vcpu) != timer->irq.level)
-               kvm_timer_update_irq(vcpu, !timer->irq.level);
+       if (kvm_timer_should_fire(vcpu) != vtimer->irq.level)
+               kvm_timer_update_irq(vcpu, !vtimer->irq.level);
 
        return 0;
 }
  */
 void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
 {
-       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
        bool phys_active;
        int ret;
 
        * to ensure that hardware interrupts from the timer triggers a guest
        * exit.
        */
-       phys_active = timer->irq.level ||
-                       kvm_vgic_map_is_active(vcpu, timer->irq.irq);
+       phys_active = vtimer->irq.level ||
+                       kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
 
        /*
         * We want to avoid hitting the (re)distributor as much as
         * - cached value is "active clear"
         * - value to be programmed is "active clear"
         */
-       if (timer->active_cleared_last && !phys_active)
+       if (vtimer->active_cleared_last && !phys_active)
                return;
 
        ret = irq_set_irqchip_state(host_vtimer_irq,
                                    phys_active);
        WARN_ON(ret);
 
-       timer->active_cleared_last = !phys_active;
+       vtimer->active_cleared_last = !phys_active;
 }
 
 /**
 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
                         const struct kvm_irq_level *irq)
 {
-       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 
        /*
         * The vcpu timer irq number cannot be determined in
         * kvm_vcpu_set_target(). To handle this, we determine
         * vcpu timer irq number when the vcpu is reset.
         */
-       timer->irq.irq = irq->irq;
+       vtimer->irq.irq = irq->irq;
 
        /*
         * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
         * resets the timer to be disabled and unmasked and is compliant with
         * the ARMv7 architecture.
         */
-       timer->cntv_ctl = 0;
+       vtimer->cnt_ctl = 0;
        kvm_timer_update_state(vcpu);
 
        return 0;
 
 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
 {
-       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 
        switch (regid) {
        case KVM_REG_ARM_TIMER_CTL:
-               timer->cntv_ctl = value;
+               vtimer->cnt_ctl = value;
                break;
        case KVM_REG_ARM_TIMER_CNT:
                vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value;
                break;
        case KVM_REG_ARM_TIMER_CVAL:
-               timer->cntv_cval = value;
+               vtimer->cnt_cval = value;
                break;
        default:
                return -1;
 
 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
 {
-       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 
        switch (regid) {
        case KVM_REG_ARM_TIMER_CTL:
-               return timer->cntv_ctl;
+               return vtimer->cnt_ctl;
        case KVM_REG_ARM_TIMER_CNT:
                return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
        case KVM_REG_ARM_TIMER_CVAL:
-               return timer->cntv_cval;
+               return vtimer->cnt_cval;
        }
        return (u64)-1;
 }
 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 
        timer_disarm(timer);
-       kvm_vgic_unmap_phys_irq(vcpu, timer->irq.irq);
+       kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq);
 }
 
 int kvm_timer_enable(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
        struct irq_desc *desc;
        struct irq_data *data;
        int phys_irq;
         * Tell the VGIC that the virtual interrupt is tied to a
         * physical interrupt. We do that once per VCPU.
         */
-       ret = kvm_vgic_map_phys_irq(vcpu, timer->irq.irq, phys_irq);
+       ret = kvm_vgic_map_phys_irq(vcpu, vtimer->irq.irq, phys_irq);
        if (ret)
                return ret;