extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
 extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
 
+extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
+
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 
 extern void __init_stage2_translation(void);
 
 #define cntvoff_el2                    CNTVOFF
 #define cnthctl_el2                    CNTHCTL
 
-void __timer_save_state(struct kvm_vcpu *vcpu);
-void __timer_restore_state(struct kvm_vcpu *vcpu);
+void __timer_enable_traps(struct kvm_vcpu *vcpu);
+void __timer_disable_traps(struct kvm_vcpu *vcpu);
 
 void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
 void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
 
        __activate_vm(vcpu);
 
        __vgic_restore_state(vcpu);
-       __timer_restore_state(vcpu);
+       __timer_enable_traps(vcpu);
 
        __sysreg_restore_state(guest_ctxt);
        __banked_restore_state(guest_ctxt);
 
        __banked_save_state(guest_ctxt);
        __sysreg_save_state(guest_ctxt);
-       __timer_save_state(vcpu);
+       __timer_disable_traps(vcpu);
+
        __vgic_save_state(vcpu);
 
        __deactivate_traps(vcpu);
 
                vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
                host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
-               __timer_save_state(vcpu);
+               __timer_disable_traps(vcpu);
                __deactivate_traps(vcpu);
                __deactivate_vm(vcpu);
                __banked_restore_state(host_ctxt);
 
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
 extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
 
+extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
+
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 
 extern u64 __vgic_v3_get_ich_vtr_el2(void);
 
 void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
 int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
 
-void __timer_save_state(struct kvm_vcpu *vcpu);
-void __timer_restore_state(struct kvm_vcpu *vcpu);
+void __timer_enable_traps(struct kvm_vcpu *vcpu);
+void __timer_disable_traps(struct kvm_vcpu *vcpu);
 
 void __sysreg_save_host_state(struct kvm_cpu_context *ctxt);
 void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt);
 
        __activate_vm(vcpu);
 
        __vgic_restore_state(vcpu);
-       __timer_restore_state(vcpu);
+       __timer_enable_traps(vcpu);
 
        /*
         * We must restore the 32-bit state before the sysregs, thanks
 
        __sysreg_save_guest_state(guest_ctxt);
        __sysreg32_save_state(vcpu);
-       __timer_save_state(vcpu);
+       __timer_disable_traps(vcpu);
        __vgic_save_state(vcpu);
 
        __deactivate_traps(vcpu);
 
                vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
                host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
-               __timer_save_state(vcpu);
+               __timer_disable_traps(vcpu);
                __deactivate_traps(vcpu);
                __deactivate_vm(vcpu);
                __sysreg_restore_host_state(host_ctxt);
 
        soft_timer_start(&timer->phys_timer, kvm_timer_compute_delta(timer_ctx));
 }
 
+static void timer_save_state(struct kvm_vcpu *vcpu)
+{
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
+
+       if (timer->enabled) {
+               vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
+               vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
+       }
+
+       /* Disable the virtual timer */
+       write_sysreg_el0(0, cntv_ctl);
+}
+
 /*
  * Schedule the background timer before calling kvm_vcpu_block, so that this
  * thread is removed from its waitqueue and made runnable when there's a timer
        soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
 }
 
+static void timer_restore_state(struct kvm_vcpu *vcpu)
+{
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
+
+       if (timer->enabled) {
+               write_sysreg_el0(vtimer->cnt_cval, cntv_cval);
+               isb();
+               write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl);
+       }
+}
+
 void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
        soft_timer_cancel(&timer->bg_timer, &timer->expired);
 }
 
+static void set_cntvoff(u64 cntvoff)
+{
+       u32 low = lower_32_bits(cntvoff);
+       u32 high = upper_32_bits(cntvoff);
+
+       /*
+        * Since kvm_call_hyp doesn't fully support the ARM PCS especially on
+        * 32-bit systems, but rather passes register by register shifted one
+        * place (we put the function address in r0/x0), we cannot simply pass
+        * a 64-bit value as an argument, but have to split the value in two
+        * 32-bit halves.
+        */
+       kvm_call_hyp(__kvm_timer_set_cntvoff, low, high);
+}
+
 static void kvm_timer_flush_hwstate_vgic(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 
        if (unlikely(!timer->enabled))
                return;
                kvm_timer_flush_hwstate_user(vcpu);
        else
                kvm_timer_flush_hwstate_vgic(vcpu);
+
+       set_cntvoff(vtimer->cntvoff);
+       timer_restore_state(vcpu);
 }
 
 /**
         */
        soft_timer_cancel(&timer->phys_timer, NULL);
 
+       timer_save_state(vcpu);
+       set_cntvoff(0);
+
        /*
         * The guest could have modified the timer registers or the timer
         * could have expired, update the timer state.
 
 
 #include <asm/kvm_hyp.h>
 
-/* vcpu is already in the HYP VA space */
-void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
+void __hyp_text __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high)
+{
+       u64 cntvoff = (u64)cntvoff_high << 32 | cntvoff_low;
+       write_sysreg(cntvoff, cntvoff_el2);
+}
+
+void __hyp_text enable_el1_phys_timer_access(void)
 {
-       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
        u64 val;
 
-       if (timer->enabled) {
-               vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
-               vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
-       }
+       /* Allow physical timer/counter access for the host */
+       val = read_sysreg(cnthctl_el2);
+       val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
+       write_sysreg(val, cnthctl_el2);
+}
 
-       /* Disable the virtual timer */
-       write_sysreg_el0(0, cntv_ctl);
+void __hyp_text disable_el1_phys_timer_access(void)
+{
+       u64 val;
 
+       /*
+        * Disallow physical timer access for the guest
+        * Physical counter access is allowed
+        */
+       val = read_sysreg(cnthctl_el2);
+       val &= ~CNTHCTL_EL1PCEN;
+       val |= CNTHCTL_EL1PCTEN;
+       write_sysreg(val, cnthctl_el2);
+}
+
+void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu)
+{
        /*
         * We don't need to do this for VHE since the host kernel runs in EL2
         * with HCR_EL2.TGE ==1, which makes those bits have no impact.
         */
-       if (!has_vhe()) {
-               /* Allow physical timer/counter access for the host */
-               val = read_sysreg(cnthctl_el2);
-               val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
-               write_sysreg(val, cnthctl_el2);
-       }
-
-       /* Clear cntvoff for the host */
-       write_sysreg(0, cntvoff_el2);
+       if (!has_vhe())
+               enable_el1_phys_timer_access();
 }
 
-void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
+void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu)
 {
-       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
-       u64 val;
-
-       /* Those bits are already configured at boot on VHE-system */
-       if (!has_vhe()) {
-               /*
-                * Disallow physical timer access for the guest
-                * Physical counter access is allowed
-                */
-               val = read_sysreg(cnthctl_el2);
-               val &= ~CNTHCTL_EL1PCEN;
-               val |= CNTHCTL_EL1PCTEN;
-               write_sysreg(val, cnthctl_el2);
-       }
-
-       if (timer->enabled) {
-               write_sysreg(vtimer->cntvoff, cntvoff_el2);
-               write_sysreg_el0(vtimer->cnt_cval, cntv_cval);
-               isb();
-               write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl);
-       }
+       if (!has_vhe())
+               disable_el1_phys_timer_access();
 }